code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import random
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = a[left_index]
_lowerCAmelCase : Tuple = left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = a[i], a[j]
i += 1
_lowerCAmelCase , _lowerCAmelCase : List[Any] = a[i - 1], a[left_index]
return i - 1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if left < right:
_lowerCAmelCase : Optional[int] = random.randint(_lowerCamelCase , right - 1 )
_lowerCAmelCase , _lowerCAmelCase : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCAmelCase : Union[str, Any] = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Enter numbers separated by a comma:\n" ).strip()
_lowerCAmelCase : int = [int(_lowerCamelCase ) for item in user_input.split("," )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = len(set_a.intersection(_lowerCamelCase ) )
if alternative_union:
_lowerCAmelCase : Dict = len(_lowerCamelCase ) + len(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[Any] = len(set_a.union(_lowerCamelCase ) )
return intersection / union
if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(_lowerCamelCase , (list, tuple) ):
_lowerCAmelCase : Optional[Any] = [element for element in set_a if element in set_b]
if alternative_union:
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase ) + len(_lowerCamelCase )
return len(_lowerCamelCase ) / union
else:
_lowerCAmelCase : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(_lowerCamelCase ) / len(_lowerCamelCase )
return len(_lowerCamelCase ) / len(_lowerCamelCase )
return None
if __name__ == "__main__":
_snake_case = {"a", "b", "c", "d", "e"}
_snake_case = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
])
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=__a, )
assert hasattr(self, "env")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_lowerCAmelCase : List[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=__a, instance_count=__a, instance_type=self.instance_type, debugger_hook_config=__a, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=__a, py_version="py36", )
def snake_case__ ( self, __a):
'''simple docstring'''
TrainingJobAnalytics(__a).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.create_estimator(__a)
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_lowerCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json", "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, __a)
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_snake_case = "."
if __name__ == "__main__":
_snake_case = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_snake_case = []
_snake_case = []
with open(doctest_file_path) as fp:
for line in fp:
_snake_case = line.strip()
_snake_case = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_snake_case = "\n".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_snake_case = 25_6047
_snake_case = 25_6145
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = NllbTokenizer
lowerCamelCase__ = NllbTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = {}
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Tuple = NllbTokenizer(__a, keep_accents=__a)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = NllbTokenizer(__a, keep_accents=__a)
_lowerCAmelCase : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(__a)
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(__a)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
_lowerCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(__a, __a)
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(__a)
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a, __a))
shutil.rmtree(__a)
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[str] = tokenizer_r.save_pretrained(__a, legacy_format=__a)
_lowerCAmelCase : int = tokenizer_p.save_pretrained(__a)
# Checks it save with the same files
self.assertSequenceEqual(__a, __a)
# Checks everything loads correctly in the same way
_lowerCAmelCase : Any = tokenizer_r.from_pretrained(__a)
_lowerCAmelCase : Any = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a, __a))
shutil.rmtree(__a)
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(__a, legacy_format=__a)
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(__a)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_lowerCAmelCase : Optional[int] = tokenizer_r.from_pretrained(__a)
_lowerCAmelCase : Any = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a, __a))
shutil.rmtree(__a)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
if not self.test_seqaseq:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
_lowerCAmelCase : Any = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_lowerCAmelCase : List[str] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_lowerCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=__a, tgt_texts=__a, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng_Latn", tgt_lang="ron_Latn", )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# max_target_length will default to max_length if not specified
_lowerCAmelCase : List[Any] = tokenizer.prepare_seqaseq_batch(
__a, tgt_texts=__a, max_length=3, return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
_lowerCAmelCase : List[str] = tokenizer.prepare_seqaseq_batch(
src_texts=__a, max_length=3, max_target_length=10, return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", __a)
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Dict = [AddedToken("<special>", lstrip=__a)]
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a, additional_special_tokens=__a, **__a)
_lowerCAmelCase : Optional[int] = tokenizer_r.encode("Hey this is a <special> token")
_lowerCAmelCase : List[Any] = tokenizer_r.encode("<special>", add_special_tokens=__a)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
_lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
__a, additional_special_tokens=__a, **__a, )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(
__a, additional_special_tokens=__a, **__a)
_lowerCAmelCase : Tuple = tokenizer_p.encode("Hey this is a <special> token")
_lowerCAmelCase : Union[str, Any] = tokenizer_cr.encode("Hey this is a <special> token")
self.assertEqual(__a, __a)
self.assertEqual(__a, __a)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = 'facebook/nllb-200-distilled-600M'
lowerCamelCase__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase__ = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="eng_Latn", tgt_lang="ron_Latn")
_lowerCAmelCase : Union[str, Any] = 1
return cls
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"], 25_6001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"], 25_6002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"], 25_6057)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __a)
def snake_case__ ( self):
'''simple docstring'''
self.assertIn(__a, self.tokenizer.all_special_ids)
# fmt: off
_lowerCAmelCase : str = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
_lowerCAmelCase : int = self.tokenizer.decode(__a, skip_special_tokens=__a)
_lowerCAmelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__a)
self.assertEqual(__a, __a)
self.assertNotIn(self.tokenizer.eos_token, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], __a)
_lowerCAmelCase : str = 10
_lowerCAmelCase : Dict = self.tokenizer(__a, max_length=__a, truncation=__a).input_ids[0]
self.assertEqual(ids[-1], 2)
self.assertEqual(ids[0], __a)
self.assertEqual(len(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [25_6203, 3])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a)
_lowerCAmelCase : List[str] = NllbTokenizer.from_pretrained(__a)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __a)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__a, truncation=__a, max_length=len(self.expected_src_tokens), return_tensors="pt", )
_lowerCAmelCase : int = shift_tokens_right(
batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id["ron_Latn"])
self.assertIsInstance(__a, __a)
self.assertEqual((2, 15), batch.input_ids.shape)
self.assertEqual((2, 15), batch.attention_mask.shape)
_lowerCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __a)
self.assertEqual(__a, batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer(self.src_text, padding=__a, truncation=__a, max_length=3, return_tensors="pt")
_lowerCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text, padding=__a, truncation=__a, max_length=10, return_tensors="pt")
_lowerCAmelCase : Tuple = targets["input_ids"]
_lowerCAmelCase : Any = shift_tokens_right(
__a, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="eng_Latn", tgt_lang="fra_Latn")
self.assertEqual(
nested_simplify(__a), {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
}, )
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = True
_lowerCAmelCase : List[str] = self.tokenizer(
"UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids, [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047])
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = self.tokenizer(
"UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids, [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2])
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = True, __a = None, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = size if size is not None else {"shortest_edge": 256}
_lowerCAmelCase : List[str] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase : Optional[int] = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : Optional[int] = do_center_crop
_lowerCAmelCase : str = crop_size
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Union[str, Any] = rescale_factor
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_size_dict(__a, default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_lowerCAmelCase : List[str] = get_resize_output_image_size(__a, size=size["shortest_edge"], default_to_square=__a)
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(__a, size=(size["height"], size["width"]), data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Optional[int] = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : List[Any] = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[int] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : Tuple = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Optional[int] = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_center_crop:
_lowerCAmelCase : List[str] = [self.center_crop(image=__a, size=__a) for image in images]
if do_rescale:
_lowerCAmelCase : str = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : int = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : int = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Any = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : int = target_sizes.numpy()
_lowerCAmelCase : Any = []
for idx in range(len(__a)):
_lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : List[str] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Union[str, Any] = logits.argmax(dim=1)
_lowerCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A ( _lowerCamelCase ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCAmelCase : List[str] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'sigmoid'
lowerCamelCase__ = 'softmax'
lowerCamelCase__ = 'none'
@add_end_docstrings(
a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = False
lowerCamelCase__ = ClassificationFunction.NONE
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def snake_case__ ( self, __a=None, __a=None, __a="", **__a):
'''simple docstring'''
_lowerCAmelCase : Any = tokenizer_kwargs
_lowerCAmelCase : List[str] = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
_lowerCAmelCase : Union[str, Any] = self.model.config.return_all_scores
if isinstance(__a, __a) or top_k is None:
_lowerCAmelCase : int = top_k
_lowerCAmelCase : Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", __a, )
if return_all_scores:
_lowerCAmelCase : Union[str, Any] = None
else:
_lowerCAmelCase : Optional[int] = 1
if isinstance(__a, __a):
_lowerCAmelCase : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCAmelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = super().__call__(*__a, **__a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCAmelCase : int = "top_k" not in kwargs
if isinstance(args[0], __a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.framework
if isinstance(__a, __a):
return self.tokenizer(**__a, return_tensors=__a, **__a)
elif isinstance(__a, __a) and len(__a) == 1 and isinstance(inputs[0], __a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=__a, **__a)
elif isinstance(__a, __a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.")
return self.tokenizer(__a, return_tensors=__a, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model(**__a)
def snake_case__ ( self, __a, __a=None, __a=1, __a=True):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCAmelCase : str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCAmelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
_lowerCAmelCase : Optional[Any] = self.model.config.function_to_apply
else:
_lowerCAmelCase : Any = ClassificationFunction.NONE
_lowerCAmelCase : List[Any] = model_outputs["logits"][0]
_lowerCAmelCase : List[str] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCAmelCase : List[str] = sigmoid(__a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCAmelCase : Dict = softmax(__a)
elif function_to_apply == ClassificationFunction.NONE:
_lowerCAmelCase : List[Any] = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCAmelCase : List[str] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__a)
]
if not _legacy:
dict_scores.sort(key=lambda __a: x["score"], reverse=__a)
if top_k is not None:
_lowerCAmelCase : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A ( *_lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=True , _lowerCamelCase=2 ):
'''simple docstring'''
from .. import __version__
_lowerCAmelCase : str = take_from
_lowerCAmelCase : int = ()
if not isinstance(args[0] , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
_lowerCAmelCase : Optional[int] = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
_lowerCAmelCase : Optional[Any] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
_lowerCAmelCase : Tuple = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
_lowerCAmelCase : Union[str, Any] = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
_lowerCAmelCase : str = warning + " " if standard_warn else ""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
_lowerCAmelCase : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
_lowerCAmelCase : Dict = call_frame.filename
_lowerCAmelCase : str = call_frame.lineno
_lowerCAmelCase : Union[str, Any] = call_frame.function
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = []
for part_id in partition_order:
_lowerCAmelCase : List[Any] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[Any] = spark.range(100 ).repartition(1 )
_lowerCAmelCase : Optional[Any] = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Dict = spark.range(10 ).repartition(2 )
_lowerCAmelCase : Union[str, Any] = [1, 0]
_lowerCAmelCase : Any = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
_lowerCAmelCase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase , _lowerCAmelCase : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : List[Any] = spark.range(10 ).repartition(1 )
_lowerCAmelCase : Dict = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCAmelCase : Dict = lambda _lowerCamelCase : x.reverse()
_lowerCAmelCase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
_lowerCAmelCase : List[str] = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase : Optional[int] = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase : str = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase : Optional[int] = spark.range(100 ).repartition(1 )
_lowerCAmelCase : Optional[Any] = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
from __future__ import annotations
_snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = len(_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] < arr[j]:
_lowerCAmelCase : Union[str, Any] = arr[j]
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for i, outer in enumerate(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCAmelCase : str = inner
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
_lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(_lowerCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCAmelCase : List[str] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = emb.weight.shape
_lowerCAmelCase : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
_lowerCAmelCase : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCAmelCase : int = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" )
else:
_lowerCAmelCase : List[str] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
_lowerCAmelCase : str = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
_lowerCAmelCase : List[str] = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
_lowerCAmelCase : List[Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
_lowerCAmelCase : Optional[int] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
_lowerCAmelCase : Dict = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
_lowerCAmelCase : Optional[Any] = key.replace("final_layer_norm" , "ff_layer_norm" )
_lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
_lowerCAmelCase : str = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(_lowerCamelCase ):
_lowerCAmelCase : int = torch.load(_lowerCamelCase )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : List[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = os.path.join(
_lowerCamelCase , weights_name.replace(".bin" , F"-{len(_lowerCamelCase )+1:05d}-of-???.bin" ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"-{len(_lowerCamelCase )+1:05d}-of-???.bin" ) )
_lowerCAmelCase : Optional[Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : Dict = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
_lowerCAmelCase : List[Any] = {}
for idx, shard in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin" )
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
_lowerCAmelCase : Optional[int] = shard_file
# Add the metadata
_lowerCAmelCase : List[Any] = {"total_size": total_size}
_lowerCAmelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + "\n"
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_snake_case = parser.parse_args()
_snake_case, _snake_case = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_snake_case = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_snake_case = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/rembert": 256,
}
_snake_case = "▁"
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = RemBertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a=True, __a=False, __a="[CLS]", __a="[SEP]", __a="<unk>", __a="[SEP]", __a="<pad>", __a="[CLS]", __a="[MASK]", **__a, ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, remove_space=__a, keep_accents=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, **__a, )
_lowerCAmelCase : Optional[int] = do_lower_case
_lowerCAmelCase : str = remove_space
_lowerCAmelCase : Dict = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = False if not self.vocab_file else True
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error("Vocabulary path ({}) should be a directory".format(__a))
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
return (out_vocab_file,)
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
])
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=__a, )
assert hasattr(self, "env")
def snake_case__ ( self, __a=1):
'''simple docstring'''
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-single", instance_count=__a, instance_type=self.instance_type, debugger_hook_config=__a, hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version="py36", )
def snake_case__ ( self, __a):
'''simple docstring'''
TrainingJobAnalytics(__a).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json", "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, __a)
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=18, __a=30, __a=400, __a=True, __a=32, __a=True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = min_resolution
_lowerCAmelCase : int = max_resolution
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : Any = size_divisor
_lowerCAmelCase : int = do_rescale
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = GLPNImageProcessor if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = GLPNImageProcessingTester(self)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a, "do_resize"))
self.assertTrue(hasattr(__a, "size_divisor"))
self.assertTrue(hasattr(__a, "resample"))
self.assertTrue(hasattr(__a, "do_rescale"))
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a, Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
_lowerCAmelCase : Any = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a, numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a, np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
_lowerCAmelCase : Any = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a, torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a, torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
_lowerCAmelCase : Any = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "Hello, World!"
_snake_case = "en_XX"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = Path("data_bin" )
_lowerCAmelCase : List[str] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCamelCase ).parent ) , checkpoint_file=Path(_lowerCamelCase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(_lowerCamelCase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(_lowerCamelCase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(_lowerCamelCase )
_lowerCAmelCase : List[Any] = xmod.model.encoder.sentence_encoder
_lowerCAmelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_lowerCAmelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , _lowerCamelCase )
_lowerCAmelCase : Any = XmodForSequenceClassification(_lowerCamelCase ) if classification_head else XmodForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase : int = xmod_sent_encoder.embed_tokens.weight
_lowerCAmelCase : Dict = xmod_sent_encoder.embed_positions.weight
_lowerCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_lowerCAmelCase : Optional[Any] = xmod_sent_encoder.layernorm_embedding.weight
_lowerCAmelCase : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase : int = model.roberta.encoder.layer[i]
_lowerCAmelCase : int = xmod_sent_encoder.layers[i]
# self attention
_lowerCAmelCase : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
_lowerCAmelCase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
_lowerCAmelCase : Union[str, Any] = xmod_layer.self_attn.q_proj.bias
_lowerCAmelCase : Any = xmod_layer.self_attn.k_proj.weight
_lowerCAmelCase : Tuple = xmod_layer.self_attn.k_proj.bias
_lowerCAmelCase : str = xmod_layer.self_attn.v_proj.weight
_lowerCAmelCase : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase : str = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
_lowerCAmelCase : Optional[int] = xmod_layer.self_attn.out_proj.weight
_lowerCAmelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.bias
_lowerCAmelCase : Any = xmod_layer.self_attn_layer_norm.weight
_lowerCAmelCase : Optional[int] = xmod_layer.self_attn_layer_norm.bias
# intermediate
_lowerCAmelCase : List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
_lowerCAmelCase : Any = xmod_layer.fca.weight
_lowerCAmelCase : List[str] = xmod_layer.fca.bias
# output
_lowerCAmelCase : int = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
_lowerCAmelCase : List[str] = xmod_layer.fca.weight
_lowerCAmelCase : Dict = xmod_layer.fca.bias
_lowerCAmelCase : Optional[Any] = xmod_layer.final_layer_norm.weight
_lowerCAmelCase : Dict = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_lowerCAmelCase : Any = xmod_layer.adapter_layer_norm.weight
_lowerCAmelCase : List[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_lowerCAmelCase : List[Any] = bert_output.adapter_modules[lang_code]
_lowerCAmelCase : List[Any] = xmod_layer.adapter_modules[lang_code]
_lowerCAmelCase : Dict = from_adapter.fca.weight
_lowerCAmelCase : Dict = from_adapter.fca.bias
_lowerCAmelCase : List[Any] = from_adapter.fca.weight
_lowerCAmelCase : Optional[int] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_lowerCAmelCase : int = xmod_sent_encoder.layer_norm.weight
_lowerCAmelCase : str = xmod_sent_encoder.layer_norm.bias
if classification_head:
_lowerCAmelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
_lowerCAmelCase : List[str] = xmod.model.classification_heads["mnli"].dense.bias
_lowerCAmelCase : int = xmod.model.classification_heads["mnli"].out_proj.weight
_lowerCAmelCase : Any = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
_lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.weight
_lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.dense.bias
_lowerCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.weight
_lowerCAmelCase : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase : List[Any] = xmod.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCamelCase )
_lowerCAmelCase : List[Any] = model(_lowerCamelCase )[0]
if classification_head:
_lowerCAmelCase : Dict = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCamelCase ) )
else:
_lowerCAmelCase : Any = xmod.model(_lowerCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
_lowerCAmelCase : List[Any] = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_snake_case = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
from collections import defaultdict
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = first_str.lower().strip()
_lowerCAmelCase : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase : List[str] = first_str.replace(" " , "" )
_lowerCAmelCase : int = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase : defaultdict[str, int] = defaultdict(_lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case = input("Enter the first string ").strip()
_snake_case = input("Enter the second string ").strip()
_snake_case = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = AlbertTokenizer
lowerCamelCase__ = AlbertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : str = AlbertTokenizer(__a)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "this is a test"
_lowerCAmelCase : List[str] = "this is a test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "<pad>"
_lowerCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<pad>")
self.assertEqual(vocab_keys[1], "<unk>")
self.assertEqual(vocab_keys[-1], "▁eloquent")
self.assertEqual(len(__a), 3_0000)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def snake_case__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : List[str] = tokenizer.tokenize(__a)
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[str] = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Any = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = tokenizer.encode(__a)
_lowerCAmelCase : List[str] = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = AlbertTokenizer(__a, keep_accents=__a)
_lowerCAmelCase : List[Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁this", "▁is", "▁a", "▁test"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), [48, 25, 21, 1289])
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."])
_lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(__a, [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."], )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = AlbertTokenizer(__a)
_lowerCAmelCase : Any = tokenizer.encode("sequence builders")
_lowerCAmelCase : Optional[Any] = tokenizer.encode("multi-sequence build")
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__a)
_lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(__a, __a)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a, model_name="albert-base-v2", revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e", )
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
from datetime import datetime
import requests
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
_lowerCAmelCase : Any = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowerCamelCase ).content
if __name__ == "__main__":
_snake_case = input("Enter Video/IGTV url: ").strip()
_snake_case = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
from __future__ import annotations
from typing import TypedDict
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase ) )]
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_lowerCAmelCase : int = all_rotations(_lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowerCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowerCamelCase ),
}
return response
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_lowerCAmelCase : Tuple = int(_lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_lowerCAmelCase : Optional[int] = [""] * len(_lowerCamelCase )
for _ in range(len(_lowerCamelCase ) ):
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_snake_case = "Provide a string that I will generate its BWT transform: "
_snake_case = input(entry_msg).strip()
_snake_case = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result["bwt_string"]}\''''
)
_snake_case = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
f'''we get original string \'{original_string}\''''
)
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : Node[T] | None = None
def __str__( self):
'''simple docstring'''
return f"{self.data}"
class UpperCAmelCase_ ( Generic[T]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Node[T] | None = None
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : str = self.top
while node:
yield node.data
_lowerCAmelCase : List[str] = node.next
def __str__( self):
'''simple docstring'''
return "->".join([str(__a) for item in self])
def __len__( self):
'''simple docstring'''
return len(tuple(iter(self)))
def snake_case__ ( self):
'''simple docstring'''
return self.top is None
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = Node(__a)
if not self.is_empty():
_lowerCAmelCase : int = self.top
_lowerCAmelCase : List[str] = node
def snake_case__ ( self):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack")
assert isinstance(self.top, __a)
_lowerCAmelCase : List[Any] = self.top
_lowerCAmelCase : Union[str, Any] = self.top.next
return pop_node.data
def snake_case__ ( self):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack")
assert self.top is not None
return self.top.data
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import csv
import tweepy
# Twitter API credentials
_snake_case = ""
_snake_case = ""
_snake_case = ""
_snake_case = ""
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tweepy.OAuthHandler(_lowerCamelCase , _lowerCamelCase )
auth.set_access_token(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = tweepy.API(_lowerCamelCase )
# initialize a list to hold all the tweepy Tweets
_lowerCAmelCase : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_lowerCAmelCase : List[Any] = api.user_timeline(screen_name=_lowerCamelCase , count=200 )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# save the id of the oldest tweet less one
_lowerCAmelCase : Dict = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_lowerCamelCase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_lowerCAmelCase : List[str] = api.user_timeline(
screen_name=_lowerCamelCase , count=200 , max_id=_lowerCamelCase )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# update the id of the oldest tweet less one
_lowerCAmelCase : List[str] = alltweets[-1].id - 1
print(F"...{len(_lowerCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_lowerCAmelCase : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , "w" ) as f:
_lowerCAmelCase : Union[str, Any] = csv.writer(_lowerCamelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_lowerCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
requires_backends(self, ["bs4"])
super().__init__(**__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowerCAmelCase : Union[str, Any] = parent.find_all(child.name, recursive=__a)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(__a) else next(i for i, s in enumerate(__a, 1) if s is child))
_lowerCAmelCase : Union[str, Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(__a, "html.parser")
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = []
for element in html_code.descendants:
if type(__a) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
_lowerCAmelCase : Dict = html.unescape(__a).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a)
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.xpath_soup(__a)
stringaxtag_seq.append(__a)
stringaxsubs_seq.append(__a)
if len(__a) != len(__a):
raise ValueError("Number of doc strings and xtags does not correspond")
if len(__a) != len(__a):
raise ValueError("Number of doc strings and xsubs does not correspond")
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = ""
for tagname, subs in zip(__a, __a):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = False
# Check that strings has a valid type
if isinstance(__a, __a):
_lowerCAmelCase : Dict = True
elif isinstance(__a, (list, tuple)):
if len(__a) == 0 or isinstance(html_strings[0], __a):
_lowerCAmelCase : str = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"but is of type {type(__a)}.")
_lowerCAmelCase : Optional[Any] = bool(isinstance(__a, (list, tuple)) and (isinstance(html_strings[0], __a)))
if not is_batched:
_lowerCAmelCase : Tuple = [html_strings]
# Get nodes + xpaths
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Union[str, Any] = []
for html_string in html_strings:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.get_three_from_single(__a)
nodes.append(__a)
_lowerCAmelCase : int = []
for node, tag_list, sub_list in zip(__a, __a, __a):
_lowerCAmelCase : Optional[Any] = self.construct_xpath(__a, __a)
xpath_strings.append(__a)
xpaths.append(__a)
# return as Dict
_lowerCAmelCase : int = {"nodes": nodes, "xpaths": xpaths}
_lowerCAmelCase : Optional[Any] = BatchFeature(data=__a, tensor_type=__a)
return encoded_inputs
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = inspect.getfile(accelerate.test_utils)
lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_cli.py'])
lowerCamelCase__ = ['accelerate', 'launch']
lowerCamelCase__ = Path.home() / '.cache/huggingface/accelerate'
lowerCamelCase__ = 'default_config.yaml'
lowerCamelCase__ = config_folder / config_file
lowerCamelCase__ = config_folder / '_default_config.yaml'
lowerCamelCase__ = Path('tests/test_configs')
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy())
def snake_case__ ( self):
'''simple docstring'''
for config in sorted(self.test_config_path.glob("**/*.yaml")):
with self.subTest(config_file=__a):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(__a), self.test_file_path], env=os.environ.copy())
def snake_case__ ( self):
'''simple docstring'''
execute_subprocess_async(["accelerate", "test"], env=os.environ.copy())
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = 'test-tpu'
lowerCamelCase__ = 'us-central1-a'
lowerCamelCase__ = 'ls'
lowerCamelCase__ = ['accelerate', 'tpu-config']
lowerCamelCase__ = 'cd /usr/share'
lowerCamelCase__ = 'tests/test_samples/test_command_file.sh'
lowerCamelCase__ = 'Running gcloud compute tpus tpu-vm ssh'
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"], return_stdout=__a)
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( a):
lowerCamelCase__ = (CMStochasticIterativeScheduler,)
lowerCamelCase__ = 10
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**__a)
return config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 10
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = self.scheduler_classes[0](**__a)
scheduler.set_timesteps(__a)
_lowerCAmelCase : Tuple = scheduler.timesteps[0]
_lowerCAmelCase : Tuple = scheduler.timesteps[1]
_lowerCAmelCase : Tuple = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : Optional[int] = scheduler.step(__a, __a, __a).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(__a, __a, __a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def snake_case__ ( self):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**__a)
_lowerCAmelCase : List[str] = 1
scheduler.set_timesteps(__a)
_lowerCAmelCase : Dict = scheduler.timesteps
_lowerCAmelCase : List[str] = torch.manual_seed(0)
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__a):
# 1. scale model input
_lowerCAmelCase : Any = scheduler.scale_model_input(__a, __a)
# 2. predict noise residual
_lowerCAmelCase : Optional[Any] = model(__a, __a)
# 3. predict previous sample x_t-1
_lowerCAmelCase : Optional[int] = scheduler.step(__a, __a, __a, generator=__a).prev_sample
_lowerCAmelCase : Any = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(__a))
_lowerCAmelCase : List[str] = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 192.7_614) < 1E-2
assert abs(result_mean.item() - 0.2_510) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**__a)
_lowerCAmelCase : List[str] = [106, 0]
scheduler.set_timesteps(timesteps=__a)
_lowerCAmelCase : str = scheduler.timesteps
_lowerCAmelCase : Tuple = torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_lowerCAmelCase : Any = scheduler.scale_model_input(__a, __a)
# 2. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(__a, __a)
# 3. predict previous sample x_t-1
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a, generator=__a).prev_sample
_lowerCAmelCase : int = pred_prev_sample
_lowerCAmelCase : List[str] = torch.sum(torch.abs(__a))
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 347.6_357) < 1E-2
assert abs(result_mean.item() - 0.4_527) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**__a)
_lowerCAmelCase : Tuple = [39, 30, 12, 15, 0]
with self.assertRaises(__a, msg="`timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**__a)
_lowerCAmelCase : Any = [39, 30, 12, 1, 0]
_lowerCAmelCase : Dict = len(__a)
with self.assertRaises(__a, msg="Can only pass one of `num_inference_steps` or `timesteps`."):
scheduler.set_timesteps(num_inference_steps=__a, timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**__a)
_lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=__a)
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCAmelCase_ ( _lowerCamelCase):
def __init__( self, __a=-1):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = label_idx
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if isinstance(A__, A__):
_lowerCAmelCase : Optional[Any] = mode.value
_lowerCAmelCase : List[Any] = os.path.join(A__, f"{mode}.txt")
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Optional[Any] = []
with open(A__, encoding="utf-8") as f:
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[int] = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=A__, labels=A__))
guid_index += 1
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[int] = []
else:
_lowerCAmelCase : List[str] = line.split(" ")
words.append(splits[0])
if len(A__) > 1:
labels.append(splits[self.label_idx].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=A__, labels=A__))
return examples
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(A__)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowerCAmelCase : Tuple = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(A__)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(A__, "r") as f:
_lowerCAmelCase : Any = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Any = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase_ ( _lowerCamelCase):
def __init__( self):
'''simple docstring'''
super().__init__(label_idx=-2)
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(A__, "r") as f:
_lowerCAmelCase : str = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Union[str, Any] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase_ ( _lowerCamelCase):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if isinstance(A__, A__):
_lowerCAmelCase : Optional[int] = mode.value
_lowerCAmelCase : List[str] = os.path.join(A__, f"{mode}.txt")
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Optional[int] = []
with open(A__, encoding="utf-8") as f:
for sentence in parse_incr(A__):
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(A__) == len(A__)
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=A__, labels=A__))
guid_index += 1
return examples
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = 0
for sentence in parse_incr(A__):
_lowerCAmelCase : str = preds_list[example_id]
_lowerCAmelCase : Union[str, Any] = ""
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += "\n"
writer.write(A__)
example_id += 1
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(A__, "r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : Optional[Any] = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : str = string.ascii_uppercase.find(__A )
_lowerCAmelCase : Any = num - key
if num < 0:
_lowerCAmelCase : int = num + len(string.ascii_uppercase )
_lowerCAmelCase : Tuple = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = input("Encrypted message: " )
_lowerCAmelCase : Tuple = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "AAPL" ):
'''simple docstring'''
_lowerCAmelCase : Dict = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_lowerCAmelCase : str = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , "html.parser" )
_lowerCAmelCase : str = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_snake_case = "scheduler_config.json"
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = 5
@dataclass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
lowerCamelCase__ = 42
class UpperCAmelCase_ :
lowerCamelCase__ = SCHEDULER_CONFIG_NAME
lowerCamelCase__ = ["dtype"]
lowerCamelCase__ = []
lowerCamelCase__ = True
@classmethod
def snake_case__ ( cls, __a = None, __a = None, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = cls.load_config(
pretrained_model_name_or_path=A_, subfolder=A_, return_unused_kwargs=A_, **A_, )
_lowerCAmelCase , _lowerCAmelCase : Dict = cls.from_config(A_, return_unused_kwargs=A_, **A_)
if hasattr(A_, "create_state") and getattr(A_, "has_state", A_):
_lowerCAmelCase : int = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def snake_case__ ( self, __a, __a = False, **__a):
'''simple docstring'''
self.save_config(save_directory=A_, push_to_hub=A_, **A_)
@property
def snake_case__ ( self):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : int = list(set([cls.__name__] + cls._compatibles))
_lowerCAmelCase : Optional[Any] = importlib.import_module(__name__.split(".")[0])
_lowerCAmelCase : int = [
getattr(A_, A_) for c in compatible_classes_str if hasattr(A_, A_)
]
return compatible_classes
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert len(_lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCamelCase ) - x.ndim) ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase=0.9_99 , _lowerCamelCase=jnp.floataa ):
'''simple docstring'''
def alpha_bar(_lowerCamelCase ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowerCAmelCase : Optional[Any] = []
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = i / num_diffusion_timesteps
_lowerCAmelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCamelCase ) / alpha_bar(_lowerCamelCase ) , _lowerCamelCase ) )
return jnp.array(_lowerCamelCase , dtype=_lowerCamelCase )
@flax.struct.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = scheduler.config
if config.trained_betas is not None:
_lowerCAmelCase : Union[str, Any] = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowerCAmelCase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : List[Any] = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : List[str] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}")
_lowerCAmelCase : Union[str, Any] = 1.0 - betas
_lowerCAmelCase : Dict = jnp.cumprod(A_, axis=0)
return cls(
alphas=A_, betas=A_, alphas_cumprod=A_, )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = state.alphas_cumprod
_lowerCAmelCase : Any = alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase : List[Any] = sqrt_alpha_prod.flatten()
_lowerCAmelCase : Optional[Any] = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape )
_lowerCAmelCase : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase : str = sqrt_one_minus_alpha_prod.flatten()
_lowerCAmelCase : Optional[Any] = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCAmelCase_ :
def snake_case__ ( self, __a, __a):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = np.abs((a - b)).max()
self.assertLessEqual(UpperCamelCase_, UpperCamelCase_, f"Difference between torch and flax is {diff} (>= {tol}).")
def snake_case__ ( self, __a, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_, UpperCamelCase_)
_lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel(UpperCamelCase_)
_lowerCAmelCase : int = model(input_ids=UpperCamelCase_, pixel_values=UpperCamelCase_, attention_mask=UpperCamelCase_)
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim))
def snake_case__ ( self, __a, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_vision_text_model(UpperCamelCase_, UpperCamelCase_)
_lowerCAmelCase : List[Any] = {'vision_model': vision_model, 'text_model': text_model}
_lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_)
_lowerCAmelCase : Tuple = model(input_ids=UpperCamelCase_, pixel_values=UpperCamelCase_, attention_mask=UpperCamelCase_)
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim))
def snake_case__ ( self, __a, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_vision_text_model(UpperCamelCase_, UpperCamelCase_)
_lowerCAmelCase : List[Any] = {'vision_model': vision_model, 'text_model': text_model}
_lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_)
_lowerCAmelCase : Any = model(input_ids=UpperCamelCase_, pixel_values=UpperCamelCase_, attention_mask=UpperCamelCase_)
_lowerCAmelCase : List[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_)
_lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = model(input_ids=UpperCamelCase_, pixel_values=UpperCamelCase_, attention_mask=UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = after_output[0]
_lowerCAmelCase : List[str] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCamelCase_, 1E-3)
def snake_case__ ( self, __a, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_vision_text_model(UpperCamelCase_, UpperCamelCase_)
_lowerCAmelCase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_)
_lowerCAmelCase : Tuple = model(
input_ids=UpperCamelCase_, pixel_values=UpperCamelCase_, attention_mask=UpperCamelCase_, output_attentions=UpperCamelCase_)
_lowerCAmelCase : Any = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase_), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : str = to_atuple(vision_model.config.image_size)
_lowerCAmelCase : Dict = to_atuple(vision_model.config.patch_size)
_lowerCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowerCAmelCase : int = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase_), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
pt_model.to(UpperCamelCase_)
pt_model.eval()
# prepare inputs
_lowerCAmelCase : List[str] = inputs_dict
_lowerCAmelCase : Any = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowerCAmelCase : Tuple = pt_model(**UpperCamelCase_).to_tuple()
_lowerCAmelCase : str = fx_model(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_), len(UpperCamelCase_), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(UpperCamelCase_, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase_)
_lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_, from_pt=UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = fx_model_loaded(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_), len(UpperCamelCase_), "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(UpperCamelCase_, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase_)
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase_, from_flax=UpperCamelCase_)
pt_model_loaded.to(UpperCamelCase_)
pt_model_loaded.eval()
with torch.no_grad():
_lowerCAmelCase : List[str] = pt_model_loaded(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_), len(UpperCamelCase_), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(UpperCamelCase_, pt_output_loaded.numpy(), 4E-2)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_, UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel(UpperCamelCase_)
_lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel(UpperCamelCase_)
_lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), UpperCamelCase_)
_lowerCAmelCase : List[Any] = fx_state
self.check_pt_flax_equivalence(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_, UpperCamelCase_)
_lowerCAmelCase : List[str] = VisionTextDualEncoderModel(UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel(UpperCamelCase_)
_lowerCAmelCase : Tuple = load_flax_weights_in_pytorch_model(UpperCamelCase_, fx_model.params)
self.check_pt_flax_equivalence(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase_)
@is_pt_flax_cross_test
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
_lowerCAmelCase : List[str] = config_inputs_dict.pop("text_config")
_lowerCAmelCase : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
self.check_equivalence_flax_to_pt(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_pretrained_model_and_inputs()
_lowerCAmelCase : int = model_a(**UpperCamelCase_)
_lowerCAmelCase : int = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_)
_lowerCAmelCase : int = model_a(**UpperCamelCase_)
_lowerCAmelCase : Any = after_outputs[0]
_lowerCAmelCase : List[str] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCamelCase_, 1E-5)
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert", vision_from_pt=UpperCamelCase_, text_from_pt=UpperCamelCase_, )
_lowerCAmelCase : Union[str, Any] = 13
_lowerCAmelCase : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowerCAmelCase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowerCAmelCase : Tuple = random_attention_mask([batch_size, 4])
_lowerCAmelCase : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = FlaxViTModel(UpperCamelCase_)
_lowerCAmelCase : Optional[Any] = FlaxBertModel(UpperCamelCase_)
return vision_model, text_model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = FlaxViTModelTester(self)
_lowerCAmelCase : Optional[int] = FlaxBertModelTester(self)
_lowerCAmelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Any = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = vision_config_and_inputs
_lowerCAmelCase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCAmelCase_ ( a , unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert", vision_from_pt=UpperCamelCase_, text_from_pt=UpperCamelCase_, )
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowerCAmelCase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowerCAmelCase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowerCAmelCase : Union[str, Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxCLIPVisionModel(UpperCamelCase_)
_lowerCAmelCase : str = FlaxBertModel(UpperCamelCase_)
return vision_model, text_model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = FlaxCLIPVisionModelTester(self)
_lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
_lowerCAmelCase : Any = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : str = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = vision_config_and_inputs
_lowerCAmelCase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0)
_lowerCAmelCase : int = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : Union[str, Any] = processor(
text=["una foto di un gatto", "una foto di un cane"], images=UpperCamelCase_, padding=UpperCamelCase_, return_tensors="np")
_lowerCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowerCAmelCase : Union[str, Any] = np.array([[1.2_284_727, 0.3_104_122]])
self.assertTrue(np.allclose(outputs.logits_per_image, UpperCamelCase_, atol=1E-3))
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( lowercase__):
lowerCamelCase__ = 'sew-d'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a=2, __a=512, __a=256, __a=True, __a=True, __a=("p2c", "c2p"), __a="layer_norm", __a="gelu_python", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.1, __a=0.02, __a=1E-7, __a=1E-5, __a="group", __a="gelu", __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), __a=False, __a=128, __a=16, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a="mean", __a=False, __a=False, __a=256, __a=0, __a=1, __a=2, **__a, ):
'''simple docstring'''
super().__init__(**__lowerCamelCase, pad_token_id=__lowerCamelCase, bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase)
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Union[str, Any] = feat_extract_norm
_lowerCAmelCase : List[str] = feat_extract_activation
_lowerCAmelCase : Any = list(__lowerCamelCase)
_lowerCAmelCase : Union[str, Any] = list(__lowerCamelCase)
_lowerCAmelCase : int = list(__lowerCamelCase)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCAmelCase : int = num_conv_pos_embedding_groups
_lowerCAmelCase : Union[str, Any] = len(self.conv_dim)
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[int] = squeeze_factor
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = position_buckets
_lowerCAmelCase : List[str] = share_att_key
_lowerCAmelCase : int = relative_attention
_lowerCAmelCase : Dict = norm_rel_ebd
_lowerCAmelCase : Optional[int] = list(__lowerCamelCase)
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Any = hidden_dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : List[str] = feat_proj_dropout
_lowerCAmelCase : int = final_dropout
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : List[Any] = feature_layer_norm_eps
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[str] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : List[str] = apply_spec_augment
_lowerCAmelCase : Optional[int] = mask_time_prob
_lowerCAmelCase : Optional[Any] = mask_time_length
_lowerCAmelCase : Union[str, Any] = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : Optional[Any] = mask_feature_length
_lowerCAmelCase : Any = mask_feature_min_masks
# ctc loss
_lowerCAmelCase : Dict = ctc_loss_reduction
_lowerCAmelCase : List[Any] = ctc_zero_infinity
# sequence classification
_lowerCAmelCase : Optional[Any] = use_weighted_layer_sum
_lowerCAmelCase : Union[str, Any] = classifier_proj_size
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = 0
_lowerCAmelCase : int = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : Tuple = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(UpperCAmelCase_)
self.add_vertex(UpperCAmelCase_)
if head == tail:
return
_lowerCAmelCase : Tuple = weight
_lowerCAmelCase : Optional[int] = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_edges()
for edge in edges:
_lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for i in range(len(UpperCAmelCase_)):
_lowerCAmelCase : Any = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(UpperCAmelCase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Optional[int] = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase : List[Any] = edge
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : List[Any] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : Union[str, Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Tuple = Graph()
if vertices is None:
_lowerCAmelCase : Tuple = []
if edges is None:
_lowerCAmelCase : List[str] = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase_)
for edge in edges:
g.add_edge(*UpperCAmelCase_)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(UpperCAmelCase_)
_lowerCAmelCase : List[Any] = item
_lowerCAmelCase : Dict = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(UpperCAmelCase_)
if item != self.parent[item]:
_lowerCAmelCase : List[str] = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.find(UpperCAmelCase_)
_lowerCAmelCase : Tuple = self.find(UpperCAmelCase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : Dict = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = graph.num_vertices
_lowerCAmelCase : Tuple = Graph.UnionFind()
_lowerCAmelCase : Optional[int] = []
while num_components > 1:
_lowerCAmelCase : Tuple = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : int = graph.get_edges()
for edge in edges:
_lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase : Optional[Any] = edge
_lowerCAmelCase : Union[str, Any] = union_find.find(UpperCAmelCase_)
_lowerCAmelCase : List[Any] = union_find.find(UpperCAmelCase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase : Union[str, Any] = cheap_edge[vertex]
if union_find.find(UpperCAmelCase_) != union_find.find(UpperCAmelCase_):
union_find.union(UpperCAmelCase_, UpperCAmelCase_)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : int = num_components - 1
_lowerCAmelCase : Dict = Graph.build(edges=UpperCAmelCase_)
return mst
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
from math import factorial, pi
def A ( _lowerCamelCase , _lowerCamelCase = 30 ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
_lowerCAmelCase : Dict = float(_lowerCamelCase )
_lowerCAmelCase : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase = 30 ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
_lowerCAmelCase : int = float(_lowerCamelCase )
_lowerCAmelCase : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=["<s>NOTUSED", "</s>NOTUSED"], __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AddedToken(_lowerCAmelCase, lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase, _lowerCAmelCase) else mask_token
_lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase, eos_token=_lowerCAmelCase, unk_token=_lowerCAmelCase, sep_token=_lowerCAmelCase, cls_token=_lowerCAmelCase, pad_token=_lowerCAmelCase, mask_token=_lowerCAmelCase, additional_special_tokens=_lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_lowerCAmelCase, )
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_lowerCAmelCase))
_lowerCAmelCase : Optional[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_lowerCAmelCase : int = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
_lowerCAmelCase : Tuple = len(self.fairseq_tokens_to_ids)
_lowerCAmelCase : Optional[Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
_lowerCAmelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
_lowerCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase, token_ids_a=_lowerCAmelCase, already_has_special_tokens=_lowerCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase)) + [1]
return [1] + ([0] * len(_lowerCAmelCase)) + [1, 1] + ([0] * len(_lowerCAmelCase)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = {self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self, __a):
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase, out_type=_lowerCAmelCase)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase)
def snake_case__ ( self, __a):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = ""
_lowerCAmelCase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Tuple = []
else:
current_sub_tokens.append(_lowerCAmelCase)
_lowerCAmelCase : List[str] = False
out_string += self.sp_model.decode(_lowerCAmelCase)
return out_string.strip()
def __getstate__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Optional[int] = os.path.join(
_lowerCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase, "wb") as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def A ( _lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase__ )
def A ( _lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase : List[Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase__ , id=lowerCamelCase__ )
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_snake_case = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( _lowerCamelCase ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return max(metric_fn(_lowerCAmelCase , _lowerCAmelCase ) for gt in ground_truths )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
_lowerCAmelCase : Optional[Any] = pd.read_csv(_lowerCAmelCase , sep="\t" , header=_lowerCAmelCase )
for answer_list in data[1]:
_lowerCAmelCase : Union[str, Any] = ast.literal_eval(_lowerCAmelCase )
answers.append(_lowerCAmelCase )
else:
_lowerCAmelCase : str = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCAmelCase : List[Any] = [[reference] for reference in references]
_lowerCAmelCase : Union[str, Any] = 0
for prediction, ground_truths in zip(_lowerCAmelCase , _lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
fa += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase : Dict = 100.0 * em / total
_lowerCAmelCase : Dict = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = args.k
_lowerCAmelCase : Optional[Any] = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCAmelCase : Union[str, Any] = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCAmelCase : Optional[Any] = 0
for hypo, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase : Union[str, Any] = set(hypo.split("\t" )[:k] )
_lowerCAmelCase : List[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase : List[Any] = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def strip_title(_lowerCamelCase ):
if title.startswith("\"" ):
_lowerCAmelCase : Dict = title[1:]
if title.endswith("\"" ):
_lowerCAmelCase : List[str] = title[:-1]
return title
_lowerCAmelCase : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )["input_ids"].to(args.device )
_lowerCAmelCase : Any = rag_model.rag.question_encoder(_lowerCAmelCase )
_lowerCAmelCase : str = question_enc_outputs[0]
_lowerCAmelCase : List[str] = rag_model.retriever(
_lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
_lowerCAmelCase : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase : str = []
for docs in all_docs:
_lowerCAmelCase : Union[str, Any] = [strip_title(_lowerCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_lowerCAmelCase ) )
return provenance_strings
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = inputs_dict.input_ids.to(args.device )
_lowerCAmelCase : Any = inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase : Dict = rag_model.generate( # rag_model overwrites generate
_lowerCAmelCase , attention_mask=_lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
if args.print_predictions:
for q, a in zip(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("Q: {} - A: {}".format(_lowerCAmelCase , _lowerCAmelCase ) )
return answers
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_lowerCAmelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_lowerCAmelCase , choices=["exact", "compressed", "legacy"] , type=_lowerCAmelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_lowerCAmelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_lowerCAmelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_lowerCAmelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_lowerCAmelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_lowerCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_lowerCAmelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_lowerCAmelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_lowerCAmelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=_lowerCAmelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
_lowerCAmelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {}
if args.model_type is None:
_lowerCAmelCase : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
_lowerCAmelCase : Any = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
_lowerCAmelCase : Dict = args.n_docs
if args.index_name is not None:
_lowerCAmelCase : List[Any] = args.index_name
if args.index_path is not None:
_lowerCAmelCase : Union[str, Any] = args.index_path
else:
_lowerCAmelCase : Optional[Any] = BartForConditionalGeneration
_lowerCAmelCase : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _lowerCAmelCase )
_lowerCAmelCase : str = get_scores if args.eval_mode == "e2e" else get_precision_at_k
_lowerCAmelCase : str = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_lowerCAmelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
_lowerCAmelCase : List[Any] = RagRetriever.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = model_class.from_pretrained(_lowerCAmelCase , retriever=_lowerCAmelCase , **_lowerCAmelCase )
model.retriever.init_retrieval()
else:
_lowerCAmelCase : str = model_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
_lowerCAmelCase : Optional[Any] = []
for line in tqdm(_lowerCAmelCase ):
questions.append(line.strip() )
if len(_lowerCAmelCase ) == args.eval_batch_size:
_lowerCAmelCase : Optional[Any] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) + "\n" )
preds_file.flush()
_lowerCAmelCase : str = []
if len(_lowerCAmelCase ) > 0:
_lowerCAmelCase : Optional[int] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) )
preds_file.flush()
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_snake_case = get_args()
main(args)
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase):
lowerCamelCase__ = CodeGenTokenizer
lowerCamelCase__ = CodeGenTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {"""add_prefix_space""": True}
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Any = dict(zip(_a, range(len(_a))))
_lowerCAmelCase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(_a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(_a))
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **_a)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **_a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = """lower newer"""
_lowerCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowerCAmelCase : Tuple = """lower newer"""
_lowerCAmelCase : List[Any] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(_a, add_prefix_space=_a)
self.assertListEqual(_a, _a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a), _a)
def snake_case__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : str = self.get_rust_tokenizer(add_prefix_space=_a)
_lowerCAmelCase : str = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(_a, add_prefix_space=_a)
_lowerCAmelCase : int = rust_tokenizer.tokenize(_a)
self.assertListEqual(_a, _a)
# Testing conversion to ids without special tokens
_lowerCAmelCase : int = tokenizer.encode(_a, add_special_tokens=_a, add_prefix_space=_a)
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(_a, add_special_tokens=_a)
self.assertListEqual(_a, _a)
# Testing conversion to ids with special tokens
_lowerCAmelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=_a)
_lowerCAmelCase : List[str] = tokenizer.encode(_a, add_prefix_space=_a)
_lowerCAmelCase : str = rust_tokenizer.encode(_a)
self.assertListEqual(_a, _a)
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a), _a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
pass
def snake_case__ ( self, __a=15):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(_a, **_a)
# Simple input
_lowerCAmelCase : int = """This is a simple input"""
_lowerCAmelCase : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[Any] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a, tokenizer_r.encode, _a, max_length=_a, padding="max_length")
# Simple input
self.assertRaises(_a, tokenizer_r.encode_plus, _a, max_length=_a, padding="max_length")
# Simple input
self.assertRaises(
_a, tokenizer_r.batch_encode_plus, _a, max_length=_a, padding="max_length", )
# Pair input
self.assertRaises(_a, tokenizer_r.encode, _a, max_length=_a, padding="max_length")
# Pair input
self.assertRaises(_a, tokenizer_r.encode_plus, _a, max_length=_a, padding="max_length")
# Pair input
self.assertRaises(
_a, tokenizer_r.batch_encode_plus, _a, max_length=_a, padding="max_length", )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>")
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : List[str] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : List[Any] = tokenizer.pad_token_id
_lowerCAmelCase : List[Any] = tokenizer(_a, padding="max_length", max_length=30, return_tensors="np")
_lowerCAmelCase : List[Any] = tokenizer(_a, padding=_a, truncate=_a, return_tensors="np")
_lowerCAmelCase : List[str] = tokenizer(*_a, padding="max_length", max_length=60, return_tensors="np")
_lowerCAmelCase : Tuple = tokenizer(_a, padding=_a, truncate=_a, return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 30)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0])
self.assertFalse(0 in out_sa["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1])
self.assertTrue(0 in out_sa["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 60)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0])
self.assertFalse(0 in out_pa["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1])
self.assertTrue(0 in out_pa["attention_mask"][1])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """$$$"""
_lowerCAmelCase : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=_a, add_bos_token=_a)
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[Any] = tokenizer.bos_token_id
_lowerCAmelCase : List[Any] = tokenizer(_a)
_lowerCAmelCase : Tuple = tokenizer(_a)
self.assertEqual(out_s.input_ids[0], _a)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(out_s.input_ids)
_lowerCAmelCase : Dict = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0], _a)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
_lowerCAmelCase : Tuple = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : Tuple = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : List[Any] = tokenizer.encode(_a)
_lowerCAmelCase : Dict = ["""^#""", re.escape("<|endoftext|>"), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(_a, truncate_before_pattern=_a)
self.assertEqual(_a, _a)
def snake_case__ ( self):
'''simple docstring'''
pass
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
_snake_case = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
_snake_case = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
_snake_case = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}), reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
], )
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__a, __a, sample_weight=__a)),
}
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
from manim import *
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Rectangle(height=0.5, width=0.5)
_lowerCAmelCase : Dict = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6)]
_lowerCAmelCase : Any = [mem.copy() for i in range(6)]
_lowerCAmelCase : Any = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase, buff=0)
_lowerCAmelCase : List[str] = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase, buff=0)
_lowerCAmelCase : Dict = VGroup(_UpperCAmelCase, _UpperCAmelCase).arrange(_UpperCAmelCase, buff=0)
_lowerCAmelCase : Tuple = Text("CPU", font_size=24)
_lowerCAmelCase : str = Group(_UpperCAmelCase, _UpperCAmelCase).arrange(_UpperCAmelCase, buff=0.5, aligned_edge=_UpperCAmelCase)
cpu.move_to([-2.5, -0.5, 0])
self.add(_UpperCAmelCase)
_lowerCAmelCase : int = [mem.copy() for i in range(4)]
_lowerCAmelCase : Tuple = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase, buff=0)
_lowerCAmelCase : int = Text("GPU", font_size=24)
_lowerCAmelCase : Tuple = Group(_UpperCAmelCase, _UpperCAmelCase).arrange(_UpperCAmelCase, buff=0.5, aligned_edge=_UpperCAmelCase)
gpu.move_to([-1, -1, 0])
self.add(_UpperCAmelCase)
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6)]
_lowerCAmelCase : Union[str, Any] = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase, buff=0)
_lowerCAmelCase : Tuple = Text("Model", font_size=24)
_lowerCAmelCase : Tuple = Group(_UpperCAmelCase, _UpperCAmelCase).arrange(_UpperCAmelCase, buff=0.5, aligned_edge=_UpperCAmelCase)
model.move_to([3, -1.0, 0])
self.add(_UpperCAmelCase)
_lowerCAmelCase : Tuple = []
for i, rect in enumerate(_UpperCAmelCase):
rect.set_stroke(_UpperCAmelCase)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 / 4, width=0.46 / 3).set_stroke(width=0.0).set_fill(_UpperCAmelCase, opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.02, direction=_UpperCAmelCase)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0], direction=_UpperCAmelCase, buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1], direction=_UpperCAmelCase, buff=0.0)
self.add(_UpperCAmelCase)
cpu_targs.append(_UpperCAmelCase)
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6)]
_lowerCAmelCase : str = VGroup(*_UpperCAmelCase).arrange(_UpperCAmelCase, buff=0)
_lowerCAmelCase : Dict = Text("Loaded Checkpoint", font_size=24)
_lowerCAmelCase : Optional[int] = Group(_UpperCAmelCase, _UpperCAmelCase).arrange(_UpperCAmelCase, aligned_edge=_UpperCAmelCase, buff=0.4)
checkpoint.move_to([3, 0.5, 0])
_lowerCAmelCase : Tuple = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_lowerCAmelCase : Any = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, )
key_text.move_to([-5, 2.4, 0])
self.add(_UpperCAmelCase, _UpperCAmelCase)
_lowerCAmelCase : Tuple = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, )
blue_text.next_to(_UpperCAmelCase, DOWN * 2.4, aligned_edge=key_text.get_left())
_lowerCAmelCase : Any = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.", font_size=24, )
step_a.move_to([2, 2, 0])
self.play(Write(_UpperCAmelCase), Write(_UpperCAmelCase))
self.play(Write(_UpperCAmelCase, run_time=1), Create(_UpperCAmelCase, run_time=1))
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = []
for i, rect in enumerate(_UpperCAmelCase):
_lowerCAmelCase : int = fill.copy().set_fill(_UpperCAmelCase, opacity=0.7)
target.move_to(_UpperCAmelCase)
first_animations.append(GrowFromCenter(_UpperCAmelCase, run_time=1))
_lowerCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(_UpperCAmelCase, run_time=1.5))
self.play(*_UpperCAmelCase)
self.play(*_UpperCAmelCase)
self.wait()
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( __lowerCAmelCase):
lowerCamelCase__ = 'decision_transformer'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=17, __a=4, __a=128, __a=4096, __a=True, __a=1, __a=1024, __a=3, __a=1, __a=None, __a="relu", __a=0.1, __a=0.1, __a=0.1, __a=1E-5, __a=0.02, __a=True, __a=True, __a=5_0256, __a=5_0256, __a=False, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = state_dim
_lowerCAmelCase : str = act_dim
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Any = max_ep_len
_lowerCAmelCase : Union[str, Any] = action_tanh
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Dict = n_positions
_lowerCAmelCase : Dict = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Union[str, Any] = n_inner
_lowerCAmelCase : Any = activation_function
_lowerCAmelCase : int = resid_pdrop
_lowerCAmelCase : int = embd_pdrop
_lowerCAmelCase : Dict = attn_pdrop
_lowerCAmelCase : List[str] = layer_norm_epsilon
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Tuple = scale_attn_weights
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : str = scale_attn_by_inverse_layer_idx
_lowerCAmelCase : int = reorder_and_upcast_attn
_lowerCAmelCase : Optional[int] = bos_token_id
_lowerCAmelCase : Dict = eos_token_id
super().__init__(bos_token_id=_UpperCamelCase, eos_token_id=_UpperCamelCase, **_UpperCamelCase)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
import os
def A ( ):
'''simple docstring'''
with open(os.path.dirname(_lowerCamelCase ) + "/p022_names.txt" ) as file:
_lowerCAmelCase : List[str] = str(file.readlines()[0] )
_lowerCAmelCase : Optional[Any] = names.replace("\"" , "" ).split("," )
names.sort()
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Union[str, Any] = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
_lowerCAmelCase : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase_ :
@property
def snake_case__ ( self):
'''simple docstring'''
return self.get_dummy_input()
@property
def snake_case__ ( self):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.")
def snake_case__ ( self, __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : List[str] = 32
_lowerCAmelCase : Any = (32, 32)
_lowerCAmelCase : str = torch.manual_seed(0)
_lowerCAmelCase : int = torch.device(__lowerCamelCase)
_lowerCAmelCase : List[str] = (batch_size, num_channels) + sizes
_lowerCAmelCase : Dict = randn_tensor(__lowerCamelCase, generator=__lowerCamelCase, device=__lowerCamelCase)
_lowerCAmelCase : int = {"hidden_states": hidden_states}
if include_temb:
_lowerCAmelCase : Any = 128
_lowerCAmelCase : List[str] = randn_tensor((batch_size, temb_channels), generator=__lowerCamelCase, device=__lowerCamelCase)
if include_res_hidden_states_tuple:
_lowerCAmelCase : str = torch.manual_seed(1)
_lowerCAmelCase : Tuple = (randn_tensor(__lowerCamelCase, generator=__lowerCamelCase, device=__lowerCamelCase),)
if include_encoder_hidden_states:
_lowerCAmelCase : Dict = floats_tensor((batch_size, 32, 32)).to(__lowerCamelCase)
if include_skip_sample:
_lowerCAmelCase : Optional[int] = randn_tensor(((batch_size, 3) + sizes), generator=__lowerCamelCase, device=__lowerCamelCase)
return dummy_input
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
_lowerCAmelCase : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels")
_lowerCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase : List[Any] = self.block_class(**__lowerCamelCase)
unet_block.to(__lowerCamelCase)
unet_block.eval()
with torch.no_grad():
_lowerCAmelCase : int = unet_block(**__lowerCamelCase)
if isinstance(__lowerCamelCase, __lowerCamelCase):
_lowerCAmelCase : Union[str, Any] = output[0]
self.assertEqual(output.shape, self.output_shape)
_lowerCAmelCase : Any = output[0, -1, -3:, -3:]
_lowerCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase).to(__lowerCamelCase)
assert torch_all_close(output_slice.flatten(), __lowerCamelCase, atol=5E-3)
@unittest.skipIf(torch_device == "mps", "Training is not supported in mps")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase : str = self.block_class(**__lowerCamelCase)
model.to(__lowerCamelCase)
model.train()
_lowerCAmelCase : Optional[int] = model(**__lowerCamelCase)
if isinstance(__lowerCamelCase, __lowerCamelCase):
_lowerCAmelCase : Optional[Any] = output[0]
_lowerCAmelCase : List[str] = torch.device(__lowerCamelCase)
_lowerCAmelCase : List[str] = randn_tensor(output.shape, device=__lowerCamelCase)
_lowerCAmelCase : Dict = torch.nn.functional.mse_loss(__lowerCamelCase, __lowerCamelCase)
loss.backward()
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_snake_case = ['''gpt2''']
_snake_case = '''gpt2'''
if is_tf_available():
class UpperCAmelCase_ ( tf.Module):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = tokenizer
_lowerCAmelCase : Any = AutoConfig.from_pretrained(__UpperCamelCase)
_lowerCAmelCase : int = TFGPTaLMHeadModel.from_config(__UpperCamelCase)
@tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name="text"),))
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer(__UpperCamelCase)
_lowerCAmelCase : Tuple = tokenized["input_ids"].to_tensor()
_lowerCAmelCase : Tuple = tf.cast(input_ids_dense > 0, tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCAmelCase : int = self.model(input_ids=__UpperCamelCase, attention_mask=__UpperCamelCase)["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : int = [GPTaTokenizer.from_pretrained(__UpperCamelCase) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCAmelCase : Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCamelCase) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_lowerCAmelCase : Any = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_lowerCAmelCase : Any = list(zip(self.test_sentences, self.test_sentences[::-1]))
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers):
for test_inputs in self.test_sentences:
_lowerCAmelCase : str = tokenizer([test_inputs], return_tensors="tf")
_lowerCAmelCase : Optional[int] = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCAmelCase : Any = python_outputs[key].numpy()
_lowerCAmelCase : Union[str, Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(__UpperCamelCase, tf.intaa) == tf_outputs_values))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Any = tf.function(__UpperCamelCase)
for test_inputs in self.test_sentences:
_lowerCAmelCase : List[str] = tf.constant(__UpperCamelCase)
_lowerCAmelCase : Dict = compiled_tokenizer(__UpperCamelCase)
_lowerCAmelCase : Optional[Any] = tf_tokenizer(__UpperCamelCase)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : str = ModelToSave(tokenizer=__UpperCamelCase)
_lowerCAmelCase : int = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCAmelCase : Union[str, Any] = model.serving(__UpperCamelCase) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Optional[Any] = Path(__UpperCamelCase) / "saved.model"
tf.saved_model.save(__UpperCamelCase, __UpperCamelCase, signatures={"serving_default": model.serving})
_lowerCAmelCase : int = tf.saved_model.load(__UpperCamelCase)
_lowerCAmelCase : List[Any] = loaded_model.signatures["serving_default"](__UpperCamelCase)["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Dict = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCAmelCase : Any = tf_tokenizer(__UpperCamelCase) # Build model with some sample inputs
_lowerCAmelCase : Any = tf_tokenizer.get_config()
_lowerCAmelCase : Tuple = TFGPTaTokenizer.from_config(__UpperCamelCase)
_lowerCAmelCase : List[Any] = model_from_config(__UpperCamelCase)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCAmelCase : str = 12_3123
for max_length in [3, 5, 1024]:
_lowerCAmelCase : List[str] = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCAmelCase : Optional[int] = tf_tokenizer(__UpperCamelCase, max_length=__UpperCamelCase)
_lowerCAmelCase : Any = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : Dict = dict(zip(snake_case_, range(len(snake_case_))))
_lowerCAmelCase : Tuple = {"unk_token": "<unk>"}
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Dict = BartphoTokenizer(snake_case_, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **snake_case_)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BartphoTokenizer(snake_case_, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : Optional[int] = "This is a là test"
_lowerCAmelCase : Optional[Any] = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : Dict = tokenizer.tokenize(snake_case_)
self.assertListEqual(snake_case_, snake_case_)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Optional[int] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_), snake_case_)
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def A ( _lowerCamelCase ):
'''simple docstring'''
def decorator(_lowerCamelCase ):
_lowerCAmelCase : Dict = getattr(__UpperCamelCase , "handle_key" , [] )
handle += [key]
setattr(__UpperCamelCase , "handle_key" , __UpperCamelCase )
return func
return decorator
def A ( *_lowerCamelCase ):
'''simple docstring'''
def decorator(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = getattr(__UpperCamelCase , "handle_key" , [] )
handle += keys
setattr(__UpperCamelCase , "handle_key" , __UpperCamelCase )
return func
return decorator
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
def __new__( cls, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = super().__new__(cls, _lowercase, _lowercase, _lowercase)
if not hasattr(_lowercase, "key_handler"):
setattr(_lowercase, "key_handler", {})
setattr(_lowercase, "handle_input", KeyHandler.handle_input)
for value in attrs.values():
_lowerCAmelCase : List[str] = getattr(_lowercase, "handle_key", [])
for key in handled_keys:
_lowerCAmelCase : List[str] = value
return new_cls
@staticmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
_lowerCAmelCase : int = ord(_lowercase)
_lowerCAmelCase : Tuple = cls.key_handler.get(_lowercase)
if handler:
_lowerCAmelCase : Union[str, Any] = char
return handler(cls)
else:
return None
def A ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
_lowerCAmelCase : Optional[int] = BlipImageProcessor()
_lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
_lowerCAmelCase : str = BlipaProcessor(A_, A_)
processor.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **A_).tokenizer
def snake_case__ ( self, **__a):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **A_).image_processor
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : str = [Image.fromarray(np.moveaxis(A_, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Optional[Any] = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : str = self.get_image_processor(do_normalize=A_, padding_value=1.0)
_lowerCAmelCase : List[str] = BlipaProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=A_, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, A_)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, A_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=A_, image_processor=A_)
_lowerCAmelCase : Any = self.prepare_image_inputs()
_lowerCAmelCase : List[Any] = image_processor(A_, return_tensors="np")
_lowerCAmelCase : Any = processor(images=A_, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BlipaProcessor(tokenizer=A_, image_processor=A_)
_lowerCAmelCase : Any = "lower newer"
_lowerCAmelCase : int = processor(text=A_)
_lowerCAmelCase : Optional[int] = tokenizer(A_, return_token_type_ids=A_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : int = BlipaProcessor(tokenizer=A_, image_processor=A_)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=A_, images=A_)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"])
# test if it raises when no input is passed
with pytest.raises(A_):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = BlipaProcessor(tokenizer=A_, image_processor=A_)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Optional[int] = processor.batch_decode(A_)
_lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(A_)
self.assertListEqual(A_, A_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=A_, image_processor=A_)
_lowerCAmelCase : Tuple = "lower newer"
_lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCAmelCase : str = processor(text=A_, images=A_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"])
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import os
def A ( ):
'''simple docstring'''
with open(os.path.dirname(_lowerCamelCase ) + "/grid.txt" ) as f:
_lowerCAmelCase : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCamelCase ) for x in f.readline().split()] )
_lowerCAmelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
_lowerCAmelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_lowerCAmelCase : List[str] = temp
# down
for i in range(17 ):
for j in range(20 ):
_lowerCAmelCase : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_lowerCAmelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_lowerCAmelCase : Dict = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_lowerCAmelCase : Optional[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_lowerCAmelCase : Union[str, Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_lowerCAmelCase : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if len(snake_case_ ) == 0:
return False
_lowerCAmelCase : Dict = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = """""" if binary_search(sequence, target) else """not """
print(f'''{target} was {not_str}found in {sequence}''')
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
# We need to create solution object to save path.
_lowerCAmelCase : Optional[int] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Tuple = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("\n".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
_lowerCAmelCase : Union[str, Any] = 1
return True
_lowerCAmelCase : Any = (not i < 0) and (not j < 0) # Check lower bounds
_lowerCAmelCase : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowerCAmelCase : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowerCAmelCase : Optional[Any] = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
_lowerCAmelCase : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
_lowerCAmelCase : Tuple = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Any = XLNetConfig.from_json_file(__snake_case )
_lowerCAmelCase : Optional[Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Tuple = finetuning_task
_lowerCAmelCase : str = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Tuple = XLNetForSequenceClassification(__snake_case )
elif "squad" in finetuning_task:
_lowerCAmelCase : int = finetuning_task
_lowerCAmelCase : Union[str, Any] = XLNetForQuestionAnswering(__snake_case )
else:
_lowerCAmelCase : Dict = XLNetLMHeadModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(__snake_case , __snake_case )
_lowerCAmelCase : Any = os.path.join(__snake_case , __snake_case )
print(F"Save PyTorch model to {os.path.abspath(__snake_case )}" )
torch.save(model.state_dict() , __snake_case )
print(F"Save configuration file to {os.path.abspath(__snake_case )}" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
import argparse
import os
import re
_snake_case = "src/transformers"
# Pattern that looks at the indentation in a line.
_snake_case = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_snake_case = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_snake_case = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_snake_case = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_snake_case = re.compile(R"\[([^\]]+)\]")
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = _re_indent.search(UpperCAmelCase__ )
return "" if search is None else search.groups()[0]
def A ( _lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase__ ):
index += 1
_lowerCAmelCase : str = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Optional[Any] = [lines[index]]
index += 1
while index < len(UpperCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(UpperCAmelCase__ ) )
if index < len(UpperCAmelCase__ ) - 1:
_lowerCAmelCase : str = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : int = []
else:
blocks.append("\n".join(UpperCAmelCase__ ) )
_lowerCAmelCase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase__ ) > 0:
blocks.append("\n".join(UpperCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def A ( _lowerCamelCase ):
'''simple docstring'''
def _inner(_lowerCamelCase ):
return key(UpperCAmelCase__ ).lower().replace("_" , "" )
return _inner
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
def noop(_lowerCamelCase ):
return x
if key is None:
_lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(UpperCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(UpperCAmelCase__ )[0].isupper() and not key(UpperCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : str = [obj for obj in objects if not key(UpperCAmelCase__ )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(UpperCAmelCase__ )
return sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ )
def A ( _lowerCamelCase ):
'''simple docstring'''
def _replace(_lowerCamelCase ):
_lowerCAmelCase : str = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
_lowerCAmelCase : Optional[int] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(UpperCAmelCase__ )] ) + "]"
_lowerCAmelCase : Union[str, Any] = import_statement.split("\n" )
if len(UpperCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[int] = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(UpperCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(UpperCAmelCase__ , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([F"\"{k}\"" for k in sort_objects(UpperCAmelCase__ )] )
return "\n".join(UpperCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : List[Any] = _re_bracket_content.sub(_replace , UpperCAmelCase__ )
return import_statement
def A ( _lowerCamelCase , _lowerCamelCase=True ):
'''simple docstring'''
with open(UpperCAmelCase__ , encoding="utf-8" ) as f:
_lowerCAmelCase : str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Union[str, Any] = split_code_in_indented_blocks(
UpperCAmelCase__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
_lowerCAmelCase : Union[str, Any] = 0
while line_idx < len(UpperCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : int = len(UpperCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Dict = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[str] = split_code_in_indented_blocks(UpperCAmelCase__ , indent_level=UpperCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : str = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : str = [(pattern.search(UpperCAmelCase__ ).groups()[0] if pattern.search(UpperCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[Any] = [(i, key) for i, key in enumerate(UpperCAmelCase__ ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(UpperCAmelCase__ , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : Any = 0
_lowerCAmelCase : str = []
for i in range(len(UpperCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(UpperCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : List[Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(UpperCAmelCase__ ) )
def A ( _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(UpperCAmelCase__ ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(UpperCAmelCase__ , "__init__.py" ) , check_only=UpperCAmelCase__ )
if result:
_lowerCAmelCase : Dict = [os.path.join(UpperCAmelCase__ , "__init__.py" )]
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F"Would overwrite {len(UpperCAmelCase__ )} files, run `make style`." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 8 , _lowerCamelCase = 1_024 , _lowerCamelCase="val" , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase="summarization" , _lowerCamelCase=None , _lowerCamelCase=1 , _lowerCamelCase = None , _lowerCamelCase="" , **_lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = str(_lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=_lowercase )
_lowerCAmelCase : str = Path(_lowercase )
_lowerCAmelCase : int = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(_lowercase )
_lowerCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).cuda()
if fpaa:
_lowerCAmelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowercase , _lowercase ) # update config with task specific params
_lowerCAmelCase : Optional[int] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowerCAmelCase : Union[str, Any] = num_return_sequences
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_lowercase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowerCAmelCase : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
_lowerCAmelCase : Optional[Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
_lowerCAmelCase : List[str] = SeqaSeqDataset(
_lowercase , _lowercase , _lowercase , max_target_length=1_024 , type_path=_lowercase , n_obs=_lowercase , prefix=_lowercase , **_lowercase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowerCAmelCase : Dict = ds.make_sortish_sampler(_lowercase , distributed=_lowercase , add_extra_examples=_lowercase , shuffle=_lowercase )
_lowerCAmelCase : Optional[int] = DataLoader(_lowercase , sampler=_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn )
_lowerCAmelCase : List[Any] = []
for batch in tqdm(_lowercase ):
_lowerCAmelCase : List[str] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_lowercase , num_beams=_lowercase , **_lowercase , )
_lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
_lowerCAmelCase : Tuple = batch["ids"]
if num_return_sequences > 1:
_lowerCAmelCase : Union[str, Any] = chunks(_lowercase , _lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowercase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(_lowercase , _lowercase )
return results, sampler.num_replicas
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=_lowercase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=_lowercase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=_lowercase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=_lowercase , default=_lowercase )
parser.add_argument(
"--type_path" , type=_lowercase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=_lowercase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_lowercase , default=8 , required=_lowercase , help="batch size" )
parser.add_argument(
"--local_rank" , type=_lowercase , default=-1 , required=_lowercase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=_lowercase , default=_lowercase , required=_lowercase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=_lowercase , default=1 , required=_lowercase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=_lowercase , default=600 , required=_lowercase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument("--tgt_lang" , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument(
"--prefix" , type=_lowercase , required=_lowercase , default=_lowercase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
_lowerCAmelCase : str = time.time()
_lowerCAmelCase : Any = parser.parse_known_args()
_lowerCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(_lowercase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
_lowerCAmelCase : str = Path(args.save_dir + "_tmp" )
Path(_lowercase ).mkdir(exist_ok=_lowercase ) # this handles locking.
_lowerCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowerCAmelCase : str = {}
if args.src_lang is not None:
_lowerCAmelCase : List[Any] = args.src_lang
if args.tgt_lang is not None:
_lowerCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowercase )
_lowerCAmelCase : Any = eval_data_dir(
args.data_dir , _lowercase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_lowercase , **_lowercase , )
if args.local_rank <= 0:
_lowerCAmelCase : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowercase )
_lowerCAmelCase : Union[str, Any] = gather_results_from_each_node(_lowercase , _lowercase , args.sync_timeout )
_lowerCAmelCase : str = combine_partial_results(_lowercase )
if args.num_return_sequences > 1:
_lowerCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(_lowercase , _lowercase )
return
_lowerCAmelCase : Tuple = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(_lowercase ) as f:
_lowerCAmelCase : str = [x.rstrip() for x in f.readlines()][: len(_lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
_lowerCAmelCase : Any = "translation" in args.task
_lowerCAmelCase : Tuple = calculate_bleu if calc_bleu else calculate_rouge
_lowerCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
_lowerCAmelCase : Dict = score_fn(_lowercase , _lowercase )
_lowerCAmelCase : Dict = len(_lowercase )
_lowerCAmelCase : Optional[int] = time.time() - start_time
_lowerCAmelCase : Tuple = round(runtime / metrics["n_obs"] , 4 )
_lowerCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowerCAmelCase : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(_lowercase , _lowercase , indent=_lowercase )
print(_lowercase )
write_txt_file(_lowercase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(_lowercase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(_lowercase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for partial_result in partial_results:
records.extend(_lowercase )
_lowerCAmelCase : str = sorted(_lowercase , key=lambda _lowerCamelCase : x["id"] )
_lowerCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = time.time()
logger.info("waiting for all nodes to finish" )
_lowerCAmelCase : str = None
while (time.time() - start_wait) < timeout:
_lowerCAmelCase : int = list(save_dir.glob("rank_*.json" ) )
if len(_lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowerCAmelCase : Tuple = lmap(_lowercase , _lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class UpperCAmelCase_ ( UpperCAmelCase__):
lowerCamelCase__ = 'falcon'
lowerCamelCase__ = ['past_key_values']
def __init__( self, __a=6_5024, __a=4544, __a=32, __a=71, __a=1E-5, __a=0.02, __a=True, __a=0.0, __a=0.0, __a=None, __a=False, __a=False, __a=True, __a=True, __a=False, __a=11, __a=11, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase : Dict = kwargs.pop("n_embed", __lowerCAmelCase)
_lowerCAmelCase : Any = hidden_size if n_embed is None else n_embed
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[Any] = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Dict = use_cache
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Union[str, Any] = eos_token_id
_lowerCAmelCase : Dict = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase : Tuple = alibi
_lowerCAmelCase : Dict = new_decoder_architecture
_lowerCAmelCase : List[str] = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase : int = parallel_attn
_lowerCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=__lowerCAmelCase, eos_token_id=__lowerCAmelCase, **__lowerCAmelCase)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def snake_case__ ( self):
'''simple docstring'''
return not self.alibi
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = int(_snake_case )
if n_element < 1:
_lowerCAmelCase : str = ValueError("a should be a positive number" )
raise my_error
_lowerCAmelCase : str = [1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = (0, 0, 0)
_lowerCAmelCase : Dict = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_snake_case = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
def A ( _lowerCamelCase ): # noqa: E741
'''simple docstring'''
_lowerCAmelCase : str = len(_lowercase )
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : Dict = [False] * n
_lowerCAmelCase : Optional[Any] = [False] * n
def dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Optional[int] = dfs(_lowercase , _lowercase , _lowercase , _lowercase )
_lowerCAmelCase : Dict = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : str = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : int = True
else:
_lowerCAmelCase : List[str] = min(low[at] , _lowercase )
return out_edge_count
for i in range(_lowercase ):
if not visited[i]:
_lowerCAmelCase : str = 0
_lowerCAmelCase : Dict = dfs(_lowercase , _lowercase , -1 , _lowercase )
_lowerCAmelCase : str = out_edge_count > 1
for x in range(len(_lowercase ) ):
if is_art[x] is True:
print(_lowercase )
# Adjacency list of graph
_snake_case = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=False, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : str = num_choices
_lowerCAmelCase : Optional[Any] = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_SCREAMING_SNAKE_CASE, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = BioGptModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BioGptForCausalLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, token_type_ids=_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, *__a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BioGptModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
# create attention mask
_lowerCAmelCase : int = torch.ones(input_ids.shape, dtype=torch.long, device=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : int = self.seq_length // 2
_lowerCAmelCase : int = 0
# first forward pass
_lowerCAmelCase , _lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
_lowerCAmelCase : Optional[Any] = ids_tensor((1,), _SCREAMING_SNAKE_CASE).item() + 1
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
_lowerCAmelCase : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
_lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens], dim=-1)
_lowerCAmelCase : Tuple = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=_SCREAMING_SNAKE_CASE)], dim=1, )
# get two different outputs
_lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)["last_hidden_state"]
_lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE, past_key_values=_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)["last_hidden_state"]
# select random slice
_lowerCAmelCase : Tuple = ids_tensor((1,), output_from_past.shape[-1]).item()
_lowerCAmelCase : int = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, atol=1E-3))
def snake_case__ ( self, __a, __a, __a, __a, __a, *__a):
'''simple docstring'''
_lowerCAmelCase : str = BioGptModel(config=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE).eval()
_lowerCAmelCase : Union[str, Any] = torch.ones(input_ids.shape, dtype=torch.long, device=_SCREAMING_SNAKE_CASE)
# first forward pass
_lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, use_cache=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 3), config.vocab_size)
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
_lowerCAmelCase : Tuple = torch.cat([input_ids, next_tokens], dim=-1)
_lowerCAmelCase : str = torch.cat([attention_mask, next_attn_mask], dim=-1)
_lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE)["last_hidden_state"]
_lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, past_key_values=_SCREAMING_SNAKE_CASE)[
"last_hidden_state"
]
# select random slice
_lowerCAmelCase : List[str] = ids_tensor((1,), output_from_past.shape[-1]).item()
_lowerCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, atol=1E-3))
def snake_case__ ( self, __a, __a, __a, __a, __a, *__a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BioGptForCausalLM(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def snake_case__ ( self, __a, *__a):
'''simple docstring'''
_lowerCAmelCase : Any = BioGptModel(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def snake_case__ ( self, __a, __a, __a, __a, __a, *__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Optional[int] = BioGptForTokenClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, token_type_ids=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BioGptModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=_SCREAMING_SNAKE_CASE, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_SCREAMING_SNAKE_CASE, gradient_checkpointing=_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_SCREAMING_SNAKE_CASE)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : str = BioGptTokenizer.from_pretrained("microsoft/biogpt")
_lowerCAmelCase : str = "left"
# Define PAD Token = EOS Token = 50256
_lowerCAmelCase : Tuple = tokenizer.eos_token
_lowerCAmelCase : Optional[int] = model.config.eos_token_id
# use different length sentences to test batching
_lowerCAmelCase : Optional[int] = [
"Hello, my dog is a little",
"Today, I",
]
_lowerCAmelCase : int = tokenizer(_SCREAMING_SNAKE_CASE, return_tensors="pt", padding=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Optional[int] = inputs["input_ids"].to(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : int = model.generate(
input_ids=_SCREAMING_SNAKE_CASE, attention_mask=inputs["attention_mask"].to(_SCREAMING_SNAKE_CASE), )
_lowerCAmelCase : List[str] = tokenizer(sentences[0], return_tensors="pt").input_ids.to(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : str = model.generate(input_ids=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
_lowerCAmelCase : Optional[Any] = tokenizer(sentences[1], return_tensors="pt").input_ids.to(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Dict = model.generate(input_ids=_SCREAMING_SNAKE_CASE, max_length=model.config.max_length - num_paddings)
_lowerCAmelCase : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE, skip_special_tokens=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0], skip_special_tokens=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Tuple = tokenizer.decode(output_padded[0], skip_special_tokens=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Tuple = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE, [non_padded_sentence, padded_sentence])
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = BioGptModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = 3
_lowerCAmelCase : List[str] = input_dict["input_ids"]
_lowerCAmelCase : int = input_ids.ne(1).to(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Any = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
_lowerCAmelCase : Dict = BioGptForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Tuple = "multi_label_classification"
_lowerCAmelCase : List[Any] = input_dict["input_ids"]
_lowerCAmelCase : str = input_ids.ne(1).to(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size).to(torch.float)
_lowerCAmelCase : Dict = BioGptForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
_lowerCAmelCase : Tuple = torch.tensor([[2, 4805, 9, 656, 21]])
_lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)[0]
_lowerCAmelCase : Optional[Any] = 4_2384
_lowerCAmelCase : Optional[Any] = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape, _SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4557], [-11.0469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]])
self.assertTrue(torch.allclose(output[:, :3, :3], _SCREAMING_SNAKE_CASE, atol=1E-4))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BioGptTokenizer.from_pretrained("microsoft/biogpt")
_lowerCAmelCase : Tuple = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(_SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_lowerCAmelCase : Any = tokenizer("COVID-19 is", return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[Any] = model.generate(
**_SCREAMING_SNAKE_CASE, min_length=100, max_length=1024, num_beams=5, early_stopping=_SCREAMING_SNAKE_CASE, )
_lowerCAmelCase : int = tokenizer.decode(output_ids[0], skip_special_tokens=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Any = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE)
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case = 16
_snake_case = 32
def A ( _lowerCamelCase , _lowerCamelCase = 16 , _lowerCamelCase = "bert-base-cased" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Any = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Tuple = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : List[str] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCamelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
model.eval()
_lowerCAmelCase : str = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : int = model(**_lowerCamelCase )
_lowerCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase : int = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCAmelCase : str = metric.compute()
return eval_metric["accuracy"]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Optional[int] = config["""lr"""]
_lowerCAmelCase : Tuple = int(config["num_epochs"] )
_lowerCAmelCase : Any = int(config["seed"] )
_lowerCAmelCase : Tuple = int(config["batch_size"] )
_lowerCAmelCase : int = args.model_name_or_path
set_seed(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
_lowerCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase : Tuple = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : str = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
_lowerCAmelCase : Any = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase : str = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : str = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase : int = 0
_lowerCAmelCase : int = evaluate.load("glue" , "mrpc" )
_lowerCAmelCase : List[Any] = num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase : int = args.resume_from_checkpoint.split("epoch_" )[1]
_lowerCAmelCase : Optional[int] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase : int = int(_lowerCamelCase ) + 1
_lowerCAmelCase : Dict = evaluation_loop(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.print("resumed checkpoint performance:" , _lowerCamelCase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json" ) , "r" ) as f:
_lowerCAmelCase : List[Any] = json.load(_lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase : Dict = {}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = model(**_lowerCamelCase )
_lowerCAmelCase : str = outputs.loss
_lowerCAmelCase : List[str] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase : int = F"epoch_{epoch}"
_lowerCAmelCase : Optional[int] = os.path.join(args.output_dir , _lowerCamelCase )
accelerator.save_state(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = evaluation_loop(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = accuracy
_lowerCAmelCase : str = lr_scheduler.get_lr()[0]
_lowerCAmelCase : List[str] = optimizer.param_groups[0]["""lr"""]
_lowerCAmelCase : Union[str, Any] = epoch
_lowerCAmelCase : str = overall_step
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"state_{epoch}.json" ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--output_dir" , type=_lowerCamelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_lowerCamelCase , default=_lowerCamelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_lowerCamelCase , default=_lowerCamelCase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_lowerCamelCase , default=2 , help="Number of train epochs." , )
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase : List[str] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
_snake_case = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
_snake_case = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
_snake_case = BeautifulSoup(res.text, "html.parser")
_snake_case = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
_snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
_snake_case = {
"""google/rembert""": 256,
}
_snake_case = """▁"""
class UpperCAmelCase_ ( __snake_case):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = RemBertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a=True, __a=False, __a="[CLS]", __a="[SEP]", __a="<unk>", __a="[SEP]", __a="<pad>", __a="[CLS]", __a="[MASK]", **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = AddedToken(A_, lstrip=A_, rstrip=A_) if isinstance(A_, A_) else mask_token
super().__init__(
A_, tokenizer_file=A_, do_lower_case=A_, remove_space=A_, keep_accents=A_, bos_token=A_, eos_token=A_, unk_token=A_, sep_token=A_, pad_token=A_, cls_token=A_, mask_token=A_, **A_, )
_lowerCAmelCase : str = do_lower_case
_lowerCAmelCase : List[str] = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[Any] = vocab_file
_lowerCAmelCase : List[Any] = False if not self.vocab_file else True
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_)) + [1] + ([0] * len(A_)) + [1]
return [1] + ([0] * len(A_)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(A_):
logger.error("Vocabulary path ({}) should be a directory".format(A_))
return
_lowerCAmelCase : Dict = os.path.join(
A_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(A_):
copyfile(self.vocab_file, A_)
return (out_vocab_file,)
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCAmelCase_ ( UpperCamelCase__ , UpperCamelCase__):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a=2000, __a=0.1, __a=20, __a=1E-3):
'''simple docstring'''
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = None
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.linspace(1, self.config.sampling_eps, __a, device=__a)
def snake_case__ ( self, __a, __a, __a, __a=None):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowerCAmelCase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowerCAmelCase : Any = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
_lowerCAmelCase : str = std.flatten()
while len(std.shape) < len(score.shape):
_lowerCAmelCase : Any = std.unsqueeze(-1)
_lowerCAmelCase : Optional[int] = -score / std
# compute
_lowerCAmelCase : Optional[Any] = -1.0 / len(self.timesteps)
_lowerCAmelCase : Union[str, Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowerCAmelCase : Any = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
_lowerCAmelCase : Any = beta_t.unsqueeze(-1)
_lowerCAmelCase : Optional[Any] = -0.5 * beta_t * x
_lowerCAmelCase : Optional[int] = torch.sqrt(__a)
_lowerCAmelCase : Dict = drift - diffusion**2 * score
_lowerCAmelCase : List[Any] = x + drift * dt
# add noise
_lowerCAmelCase : Tuple = randn_tensor(x.shape, layout=x.layout, generator=__a, device=x.device, dtype=x.dtype)
_lowerCAmelCase : Dict = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_snake_case = logging.getLogger(__name__)
def A ( _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase = 10 , _lowerCamelCase = 2 ):
'''simple docstring'''
def get_dataset(_lowerCamelCase ):
_lowerCAmelCase : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCAmelCase : Dict = get_dataset(__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = get_dataset(__lowerCAmelCase )
_lowerCAmelCase : Any = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
_lowerCAmelCase : str = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCAmelCase : Optional[int] = batch
_lowerCAmelCase : Optional[int] = model(__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase_ ( nn.Module):
def __init__( self):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = nn.Parameter(torch.randn(1))
_lowerCAmelCase : Tuple = nn.Parameter(torch.randn(1))
def snake_case__ ( self, __a):
'''simple docstring'''
return x * self.a + self.b
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase : List[Any] = DummyModel()
_lowerCAmelCase : List[str] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase : Union[str, Any] = dummy_dataloaders()
_lowerCAmelCase : Tuple = ProjectConfiguration(total_limit=1, project_dir=lowerCamelCase__, automatic_checkpoint_naming=lowerCamelCase__)
# Train baseline
_lowerCAmelCase : List[str] = Accelerator(project_config=lowerCamelCase__)
_lowerCAmelCase : Optional[Any] = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)), 1)
def snake_case__ ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase : List[Any] = DummyModel()
_lowerCAmelCase : Dict = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase : Union[str, Any] = dummy_dataloaders()
# Train baseline
_lowerCAmelCase : List[str] = Accelerator()
_lowerCAmelCase : Optional[Any] = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
# Save initial
_lowerCAmelCase : Any = os.path.join(lowerCamelCase__, "initial")
accelerator.save_state(lowerCamelCase__)
(_lowerCAmelCase) : Union[str, Any] = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
_lowerCAmelCase : Tuple = train(3, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
(_lowerCAmelCase) : Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase : List[str] = optimizer.state_dict()
# Train partially
set_seed(42)
_lowerCAmelCase : Optional[Any] = DummyModel()
_lowerCAmelCase : int = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase : List[Any] = dummy_dataloaders()
_lowerCAmelCase : Union[str, Any] = Accelerator()
_lowerCAmelCase : Optional[Any] = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
accelerator.load_state(lowerCamelCase__)
(_lowerCAmelCase) : Tuple = model.a.item(), model.b.item()
_lowerCAmelCase : Union[str, Any] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
_lowerCAmelCase : Optional[Any] = train(2, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
# Save everything
_lowerCAmelCase : Optional[Any] = os.path.join(lowerCamelCase__, "checkpoint")
accelerator.save_state(lowerCamelCase__)
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__)
test_rands += train(1, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
(_lowerCAmelCase) : Union[str, Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
def snake_case__ ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase : Optional[Any] = DummyModel()
_lowerCAmelCase : int = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase : Any = dummy_dataloaders()
_lowerCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__)
# Train baseline
_lowerCAmelCase : str = Accelerator(project_dir=lowerCamelCase__, project_config=lowerCamelCase__)
_lowerCAmelCase : Optional[Any] = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
# Save initial
accelerator.save_state()
(_lowerCAmelCase) : Optional[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Union[str, Any] = optimizer.state_dict()
_lowerCAmelCase : Optional[Any] = train(3, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
(_lowerCAmelCase) : Dict = model.a.item(), model.b.item()
_lowerCAmelCase : Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42)
_lowerCAmelCase : Optional[Any] = DummyModel()
_lowerCAmelCase : str = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase : Dict = dummy_dataloaders()
_lowerCAmelCase : Optional[Any] = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=lowerCamelCase__)
_lowerCAmelCase : Optional[Any] = Accelerator(project_dir=lowerCamelCase__, project_config=lowerCamelCase__)
_lowerCAmelCase : Union[str, Any] = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
accelerator.load_state(os.path.join(lowerCamelCase__, "checkpoints", "checkpoint_0"))
(_lowerCAmelCase) : int = model.a.item(), model.b.item()
_lowerCAmelCase : int = optimizer.state_dict()
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
_lowerCAmelCase : str = train(2, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__, "checkpoints", "checkpoint_1"))
test_rands += train(1, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
(_lowerCAmelCase) : Dict = model.a.item(), model.b.item()
_lowerCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
self.assertEqual(lowerCamelCase__, lowerCamelCase__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = torch.tensor([1, 2, 3])
_lowerCAmelCase : Any = torch.tensor([2, 3, 4])
_lowerCAmelCase : Optional[int] = DummyModel()
_lowerCAmelCase : List[Any] = torch.optim.Adam(net.parameters())
_lowerCAmelCase : Union[str, Any] = Accelerator()
with self.assertRaises(lowerCamelCase__) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
_lowerCAmelCase : Tuple = str(ve.exception)
self.assertTrue("Item at index 0" in message)
self.assertTrue("Item at index 1" in message)
self.assertFalse("Item at index 2" in message)
self.assertFalse("Item at index 3" in message)
def snake_case__ ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase : Optional[int] = DummyModel()
_lowerCAmelCase : int = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase : int = torch.optim.lr_scheduler.StepLR(lowerCamelCase__, step_size=1, gamma=0.99)
_lowerCAmelCase : Any = dummy_dataloaders()
_lowerCAmelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__)
# Train baseline
_lowerCAmelCase : Optional[Any] = Accelerator(project_dir=lowerCamelCase__, project_config=lowerCamelCase__)
_lowerCAmelCase : int = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
# Save initial
accelerator.save_state()
_lowerCAmelCase : Union[str, Any] = scheduler.state_dict()
train(3, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
self.assertNotEqual(lowerCamelCase__, scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__, "checkpoints", "checkpoint_0"))
self.assertEqual(lowerCamelCase__, scheduler.state_dict())
def snake_case__ ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase : Dict = DummyModel()
_lowerCAmelCase : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__, total_limit=2)
# Train baseline
_lowerCAmelCase : Optional[int] = Accelerator(project_dir=lowerCamelCase__, project_config=lowerCamelCase__)
_lowerCAmelCase : Union[str, Any] = accelerator.prepare(lowerCamelCase__)
# Save 3 states:
for _ in range(11):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__, "checkpoints", "checkpoint_0")))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__, "checkpoints", "checkpoint_9")))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__, "checkpoints", "checkpoint_10")))
@require_cuda
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(lowerCamelCase__, env=os.environ.copy())
if __name__ == "__main__":
_snake_case = "/tmp/accelerate/state_checkpointing"
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_snake_case, _snake_case = dummy_dataloaders()
_snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_snake_case, _snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_snake_case = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_snake_case = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_snake_case = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_snake_case = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class UpperCAmelCase_ ( __a):
lowerCamelCase__ = "albert"
def __init__( self, __a=3_0000, __a=128, __a=4096, __a=12, __a=1, __a=64, __a=1_6384, __a=1, __a="gelu_new", __a=0, __a=0, __a=512, __a=2, __a=0.02, __a=1E-12, __a=0.1, __a="absolute", __a=0, __a=2, __a=3, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_, bos_token_id=lowerCAmelCase_, eos_token_id=lowerCAmelCase_, **lowerCAmelCase_)
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Dict = embedding_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_hidden_groups
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[int] = inner_group_num
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : int = classifier_dropout_prob
_lowerCAmelCase : Dict = position_embedding_type
class UpperCAmelCase_ ( __a):
@property
def snake_case__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : List[str] = device
_lowerCAmelCase : Tuple = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Dict = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Dict = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : List[Any] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.resize(__a)
_lowerCAmelCase : str = self.center_crop(__a)
_lowerCAmelCase : int = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : Optional[int] = self.preprocess_img(__a)
_lowerCAmelCase : str = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = device if device else get_device()
if vqgan:
_lowerCAmelCase : List[Any] = vqgan
else:
_lowerCAmelCase : Union[str, Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : Tuple = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Any = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : List[Any] = iterations
_lowerCAmelCase : str = lr
_lowerCAmelCase : Optional[int] = log
_lowerCAmelCase : List[Any] = make_grid
_lowerCAmelCase : Optional[int] = return_val
_lowerCAmelCase : Optional[int] = quantize
_lowerCAmelCase : List[Any] = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
if output_path is None:
_lowerCAmelCase : Optional[int] = """./animation.gif"""
if input_path is None:
_lowerCAmelCase : List[str] = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : List[Any] = total_duration / len(__a)
_lowerCAmelCase : int = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Tuple = 1.5
_lowerCAmelCase : str = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Tuple = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase : List[str] = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.latent.detach().requires_grad_()
_lowerCAmelCase : int = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase : Union[str, Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Optional[Any] = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : List[str] = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : Optional[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : int = torch.tensor([1], device=self.device)
_lowerCAmelCase : Dict = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : int = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Optional[Any] = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Dict = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : Dict = Image.open(__a)
_lowerCAmelCase : Optional[Any] = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : Dict = []
_lowerCAmelCase : str = []
if isinstance(__a, __a):
_lowerCAmelCase : int = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : str = prompt[0]
_lowerCAmelCase : Optional[Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase : str = prompt.split(":")
_lowerCAmelCase : int = float(__a)
else:
_lowerCAmelCase : Union[str, Any] = prompt
_lowerCAmelCase : Optional[int] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : str = self._get_latent(__a)
else:
_lowerCAmelCase : List[str] = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : int = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : str = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Dict = save_path + """_""" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : str = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : Optional[int] = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
_snake_case = {"mobilebert-uncased": 512}
_snake_case = {}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = MobileBertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__, tokenizer_file=UpperCAmelCase__, do_lower_case=UpperCAmelCase__, unk_token=UpperCAmelCase__, sep_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, cls_token=UpperCAmelCase__, mask_token=UpperCAmelCase__, tokenize_chinese_chars=UpperCAmelCase__, strip_accents=UpperCAmelCase__, **UpperCAmelCase__, )
_lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", UpperCAmelCase__) != do_lower_case
or normalizer_state.get("strip_accents", UpperCAmelCase__) != strip_accents
or normalizer_state.get("handle_chinese_chars", UpperCAmelCase__) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(UpperCAmelCase__, normalizer_state.pop("type"))
_lowerCAmelCase : Tuple = do_lower_case
_lowerCAmelCase : List[Any] = strip_accents
_lowerCAmelCase : str = tokenize_chinese_chars
_lowerCAmelCase : int = normalizer_class(**UpperCAmelCase__)
_lowerCAmelCase : Any = do_lower_case
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(UpperCAmelCase__, name=UpperCAmelCase__)
return tuple(UpperCAmelCase__)
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
_snake_case = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {}
with open(_lowerCamelCase , "r" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = line.strip()
if line:
_lowerCAmelCase : List[str] = line.split()
_lowerCAmelCase : Optional[Any] = line_number
_lowerCAmelCase : List[str] = words[0]
_lowerCAmelCase : int = value
return result
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
_lowerCAmelCase : Dict = getattr(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
_lowerCAmelCase : Union[str, Any] = "param"
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : List[str] = hf_pointer
for attribute in hf_param_name.split("." ):
_lowerCAmelCase : List[str] = getattr(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase : str = value[0]
else:
_lowerCAmelCase : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : str = value
elif weight_type == "weight_v":
_lowerCAmelCase : Dict = value
elif weight_type == "bias":
_lowerCAmelCase : Tuple = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
_lowerCAmelCase : Any = getattr(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = value
else:
_lowerCAmelCase : Optional[int] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : List[str] = PARAM_MAPPING[full_name.split("." )[-1]]
_lowerCAmelCase : Dict = "param"
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : Any = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : str = ".".join([key, hf_param_name] )
else:
_lowerCAmelCase : List[str] = key
_lowerCAmelCase : Optional[int] = value if "lm_head" in full_key else value[0]
_snake_case = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Any = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : Any = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCAmelCase : int = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCAmelCase : Dict = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : int = "weight_v"
elif "bias" in name:
_lowerCAmelCase : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : Dict = "weight"
else:
_lowerCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Union[str, Any] = fairseq_model.state_dict()
_lowerCAmelCase : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCAmelCase : Union[str, Any] = True
else:
_lowerCAmelCase : List[Any] = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = full_name.split("conv_layers." )[-1]
_lowerCAmelCase : Dict = name.split("." )
_lowerCAmelCase : str = int(items[0] )
_lowerCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=False ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase : str = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase : List[str] = read_txt_into_dict(_lowerCamelCase )
_lowerCAmelCase : str = idalabel
_lowerCAmelCase : Union[str, Any] = WavaVecaForSequenceClassification(_lowerCamelCase )
_lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase : int = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : Dict = target_dict.pad_index
_lowerCAmelCase : List[str] = target_dict.bos_index
_lowerCAmelCase : Optional[Any] = target_dict.eos_index
_lowerCAmelCase : str = len(target_dict.symbols )
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Tuple = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCAmelCase : int = True if config.feat_extract_norm == "layer" else False
_lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCAmelCase : Optional[Any] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = WavaVecaForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : int = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCAmelCase : Tuple = argparse.Namespace(task="audio_pretraining" )
_lowerCAmelCase : str = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
_snake_case = parser.parse_args()
_snake_case = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : List[str] = 100
self.assertEqual(kp.calc_profit(__a, __a, __a), 210)
def snake_case__ ( self):
'''simple docstring'''
self.assertRaisesRegex(__a, "max_weight must greater than zero.")
def snake_case__ ( self):
'''simple docstring'''
self.assertRaisesRegex(__a, "Weight can not be negative.")
def snake_case__ ( self):
'''simple docstring'''
self.assertRaisesRegex(__a, "Profit can not be negative.")
def snake_case__ ( self):
'''simple docstring'''
self.assertRaisesRegex(__a, "max_weight must greater than zero.")
def snake_case__ ( self):
'''simple docstring'''
self.assertRaisesRegex(
__a, "The length of profit and weight must be same.")
if __name__ == "__main__":
unittest.main()
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "ylacombe/bark-small"
_lowerCAmelCase : List[str] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[int] = "en_speaker_1"
_lowerCAmelCase : List[str] = "This is a test string"
_lowerCAmelCase : int = "speaker_embeddings_path.json"
_lowerCAmelCase : Union[str, Any] = "speaker_embeddings"
def snake_case__ ( self, **__a):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint, **__UpperCamelCase)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=__UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
processor.save_pretrained(
self.tmpdirname, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, speaker_embeddings_directory=self.speaker_embeddings_directory, )
_lowerCAmelCase : str = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname, self.speaker_embeddings_dict_path, bos_token="(BOS)", eos_token="(EOS)", )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
_lowerCAmelCase : int = 35
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : Optional[Any] = 8
_lowerCAmelCase : Optional[int] = {
"semantic_prompt": np.ones(__UpperCamelCase),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
_lowerCAmelCase : str = processor(text=self.input_string, voice_preset=__UpperCamelCase)
_lowerCAmelCase : Dict = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(__UpperCamelCase, np.array([])).tolist())
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname, "file.npz")
np.savez(__UpperCamelCase, **__UpperCamelCase)
_lowerCAmelCase : List[Any] = processor(text=self.input_string, voice_preset=__UpperCamelCase)
_lowerCAmelCase : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(__UpperCamelCase, np.array([])).tolist())
# test loading voice preset from the hub
_lowerCAmelCase : Optional[int] = processor(text=self.input_string, voice_preset=self.voice_preset)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=__UpperCamelCase)
_lowerCAmelCase : Tuple = processor(text=self.input_string)
_lowerCAmelCase : Tuple = tokenizer(
self.input_string, padding="max_length", max_length=256, add_special_tokens=__UpperCamelCase, return_attention_mask=__UpperCamelCase, return_token_type_ids=__UpperCamelCase, )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist())
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
import os
from pathlib import Path
def A ( ):
'''simple docstring'''
from torch.utils.cpp_extension import load
_lowerCAmelCase : Tuple = Path(lowerCAmelCase_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_lowerCAmelCase : Optional[Any] = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowerCAmelCase_ , with_cuda=lowerCAmelCase_ , extra_include_paths=[str(lowerCAmelCase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _UpperCAmelCase , unittest.TestCase):
lowerCamelCase__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : List[str] = np.random.RandomState(lowercase__)
_lowerCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCAmelCase : Dict = pipe(**lowercase__).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**lowercase__).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Tuple = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : int = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**lowercase__).images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**lowercase__).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Tuple = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**lowercase__).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : str = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : int = self.get_dummy_inputs()
_lowerCAmelCase : Optional[int] = pipe(**lowercase__).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Tuple = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : List[Any] = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase : Optional[Any] = pipe(**lowercase__)
_lowerCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[int] = 3 * [inputs.pop("prompt")]
_lowerCAmelCase : str = pipe.tokenizer(
lowercase__, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=lowercase__, return_tensors="np", )
_lowerCAmelCase : Any = text_inputs["""input_ids"""]
_lowerCAmelCase : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowerCAmelCase : Optional[int] = prompt_embeds
# forward
_lowerCAmelCase : Optional[int] = pipe(**lowercase__)
_lowerCAmelCase : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : List[Any] = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase : Any = negative_prompt
_lowerCAmelCase : Dict = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase : Optional[int] = pipe(**lowercase__)
_lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
_lowerCAmelCase : List[Any] = 3 * [inputs.pop("prompt")]
_lowerCAmelCase : List[Any] = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase : str = pipe.tokenizer(
lowercase__, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=lowercase__, return_tensors="np", )
_lowerCAmelCase : Any = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowerCAmelCase : str = embeds
# forward
_lowerCAmelCase : str = pipe(**lowercase__)
_lowerCAmelCase : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = ort.SessionOptions()
_lowerCAmelCase : Any = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=lowercase__, feature_extractor=lowercase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Any = """A painting of a squirrel eating a burger"""
np.random.seed(0)
_lowerCAmelCase : Dict = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np")
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx")
_lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=lowercase__, safety_checker=lowercase__, feature_extractor=lowercase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : List[Any] = """open neural network exchange"""
_lowerCAmelCase : str = np.random.RandomState(0)
_lowerCAmelCase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowercase__, output_type="np")
_lowerCAmelCase : int = output.images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : str = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx")
_lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=lowercase__, safety_checker=lowercase__, feature_extractor=lowercase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Dict = """open neural network exchange"""
_lowerCAmelCase : Any = np.random.RandomState(0)
_lowerCAmelCase : str = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowercase__, output_type="np")
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = 0
def test_callback_fn(__a, __a, __a) -> None:
_lowerCAmelCase : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase : str = latents[0, -3:, -3:, -1]
_lowerCAmelCase : str = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=lowercase__, feature_extractor=lowercase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowercase__)
_lowerCAmelCase : Optional[int] = """Andromeda galaxy in a bottle"""
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(0)
pipe(
prompt=lowercase__, num_inference_steps=5, guidance_scale=7.5, generator=lowercase__, callback=lowercase__, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=lowercase__, feature_extractor=lowercase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowercase__, lowercase__)
assert pipe.safety_checker is None
_lowerCAmelCase : int = pipe("example prompt", num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__)
_lowerCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(lowercase__)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase : Optional[Any] = pipe("example prompt", num_inference_steps=2).images[0]
assert image is not None
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_snake_case = "pt"
elif is_tf_available():
_snake_case = "tf"
else:
_snake_case = "jax"
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
lowerCamelCase__ = ByTaTokenizer
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small")
def snake_case__ ( self, **__a):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **__snake_case)
def snake_case__ ( self, __a, __a=False, __a=20, __a=5):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(len(__snake_case)):
try:
_lowerCAmelCase : List[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=__snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
_lowerCAmelCase : Optional[int] = list(filter(lambda __a: re.match(R"^[ a-zA-Z]+$", t[1]), __snake_case))
_lowerCAmelCase : List[str] = list(filter(lambda __a: [t[0]] == tokenizer.encode(t[1], add_special_tokens=__snake_case), __snake_case))
if max_length is not None and len(__snake_case) > max_length:
_lowerCAmelCase : List[Any] = toks[:max_length]
if min_length is not None and len(__snake_case) < min_length and len(__snake_case) > 0:
while len(__snake_case) < min_length:
_lowerCAmelCase : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : List[str] = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Any = tokenizer.decode(__snake_case, clean_up_tokenization_spaces=__snake_case)
if " " not in output_txt and len(__snake_case) > 1:
_lowerCAmelCase : List[str] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=__snake_case)
+ ''' '''
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=__snake_case)
)
if with_prefix_space:
_lowerCAmelCase : List[str] = ''' ''' + output_txt
_lowerCAmelCase : Optional[int] = tokenizer.encode(__snake_case, add_special_tokens=__snake_case)
return output_txt, output_ids
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.ta_base_tokenizer
_lowerCAmelCase : List[str] = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"])
_lowerCAmelCase : Optional[int] = tokenizer(["hi", "I went to the gym", ""])
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.ta_base_tokenizer
_lowerCAmelCase : Union[str, Any] = '''Unicode €.'''
_lowerCAmelCase : Optional[Any] = tokenizer(__snake_case)
_lowerCAmelCase : Union[str, Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], __snake_case)
# decoding
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(__snake_case)
self.assertEqual(__snake_case, "Unicode €.</s>")
_lowerCAmelCase : str = tokenizer("e è é ê ë")
_lowerCAmelCase : Dict = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], __snake_case)
# decoding
_lowerCAmelCase : Optional[Any] = tokenizer.decode(__snake_case)
self.assertEqual(__snake_case, "e è é ê ë</s>")
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.ta_base_tokenizer
_lowerCAmelCase : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
_lowerCAmelCase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowerCAmelCase : Any = tokenizer(__snake_case, padding=__snake_case, return_tensors=__snake_case)
self.assertIsInstance(__snake_case, __snake_case)
if FRAMEWORK != "jax":
_lowerCAmelCase : str = list(batch.input_ids.numpy()[0])
else:
_lowerCAmelCase : Optional[int] = list(batch.input_ids.tolist()[0])
self.assertListEqual(__snake_case, __snake_case)
self.assertEqual((2, 37), batch.input_ids.shape)
self.assertEqual((2, 37), batch.attention_mask.shape)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.ta_base_tokenizer
_lowerCAmelCase : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowerCAmelCase : Tuple = tokenizer(__snake_case, padding=__snake_case, return_tensors=__snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", __snake_case)
self.assertIn("attention_mask", __snake_case)
self.assertNotIn("decoder_input_ids", __snake_case)
self.assertNotIn("decoder_attention_mask", __snake_case)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.ta_base_tokenizer
_lowerCAmelCase : int = [
'''Summary of the text.''',
'''Another summary.''',
]
_lowerCAmelCase : List[str] = tokenizer(
text_target=__snake_case, max_length=32, padding="max_length", truncation=__snake_case, return_tensors=__snake_case)
self.assertEqual(32, targets["input_ids"].shape[1])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.ta_base_tokenizer
_lowerCAmelCase : int = ['''A long paragraph for summarization. </s>''']
_lowerCAmelCase : Tuple = ['''Summary of the text. </s>''']
# fmt: off
_lowerCAmelCase : Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowerCAmelCase : Tuple = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowerCAmelCase : Optional[int] = tokenizer(__snake_case, text_target=__snake_case)
self.assertEqual(__snake_case, batch["input_ids"][0])
self.assertEqual(__snake_case, batch["labels"][0])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
_lowerCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
_lowerCAmelCase : int = tokenizer.encode(__snake_case, add_special_tokens=__snake_case)
tokenizer.save_pretrained(__snake_case)
_lowerCAmelCase : int = tokenizer.__class__.from_pretrained(__snake_case)
_lowerCAmelCase : Optional[Any] = after_tokenizer.encode(__snake_case, add_special_tokens=__snake_case)
self.assertListEqual(__snake_case, __snake_case)
shutil.rmtree(__snake_case)
_lowerCAmelCase : Any = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(["bim", "bambam"])
_lowerCAmelCase : List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
_lowerCAmelCase : int = tokenizer.encode(__snake_case, add_special_tokens=__snake_case)
tokenizer.save_pretrained(__snake_case)
_lowerCAmelCase : int = tokenizer.__class__.from_pretrained(__snake_case)
_lowerCAmelCase : Any = after_tokenizer.encode(__snake_case, add_special_tokens=__snake_case)
self.assertListEqual(__snake_case, __snake_case)
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
_lowerCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(__snake_case, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(__snake_case)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case)
with open(os.path.join(__snake_case, "special_tokens_map.json"), encoding="utf-8") as json_file:
_lowerCAmelCase : List[Any] = json.load(__snake_case)
with open(os.path.join(__snake_case, "tokenizer_config.json"), encoding="utf-8") as json_file:
_lowerCAmelCase : Any = json.load(__snake_case)
_lowerCAmelCase : Optional[int] = [f"<extra_id_{i}>" for i in range(125)]
_lowerCAmelCase : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
_lowerCAmelCase : Union[str, Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__snake_case, "special_tokens_map.json"), "w", encoding="utf-8") as outfile:
json.dump(__snake_case, __snake_case)
with open(os.path.join(__snake_case, "tokenizer_config.json"), "w", encoding="utf-8") as outfile:
json.dump(__snake_case, __snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase : List[Any] = tokenizer_class.from_pretrained(
__snake_case, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase : Any = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=__snake_case)]
_lowerCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
__snake_case, additional_special_tokens=__snake_case, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens)
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case)
_lowerCAmelCase : Tuple = tokenizer_class.from_pretrained(__snake_case)
self.assertTrue(tokenizer.decode([255]) == "")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_tokenizers(fast=__snake_case, do_lower_case=__snake_case)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
_lowerCAmelCase : str = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_string(__snake_case)
self.assertIsInstance(__snake_case, __snake_case)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
_lowerCAmelCase : Union[str, Any] = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(
__snake_case, skip_special_tokens=__snake_case)
for attr in attributes_list:
setattr(__snake_case, attr + "_id", __snake_case)
self.assertEqual(getattr(__snake_case, __snake_case), __snake_case)
self.assertEqual(getattr(__snake_case, attr + "_id"), __snake_case)
setattr(__snake_case, attr + "_id", __snake_case)
self.assertEqual(getattr(__snake_case, __snake_case), __snake_case)
self.assertEqual(getattr(__snake_case, attr + "_id"), __snake_case)
setattr(__snake_case, "additional_special_tokens_ids", [])
self.assertListEqual(getattr(__snake_case, "additional_special_tokens"), [])
self.assertListEqual(getattr(__snake_case, "additional_special_tokens_ids"), [])
setattr(__snake_case, "additional_special_tokens_ids", [token_id_to_test_setters])
self.assertListEqual(getattr(__snake_case, "additional_special_tokens"), [token_to_test_setters])
self.assertListEqual(getattr(__snake_case, "additional_special_tokens_ids"), [token_id_to_test_setters])
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_snake_case = """<<<<<<< This should probably be modified because it mentions: """
_snake_case = """=======
>>>>>>>
"""
_snake_case = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
_snake_case = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def A ( _lowerCamelCase ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase_ ( a):
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__a, required=__a, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__a, required=__a, help="Path to the HuggingFace Datasets folder.")
train_parser.set_defaults(func=__a)
def __init__( self, __a, __a, *__a):
'''simple docstring'''
_lowerCAmelCase : List[str] = get_logger("datasets-cli/converting")
_lowerCAmelCase : Union[str, Any] = tfds_path
_lowerCAmelCase : Any = datasets_directory
def snake_case__ ( self):
'''simple docstring'''
if os.path.isdir(self._tfds_path):
_lowerCAmelCase : str = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
_lowerCAmelCase : str = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
_lowerCAmelCase : str = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Any = {}
if os.path.isdir(self._tfds_path):
_lowerCAmelCase : Tuple = os.listdir(__a)
else:
_lowerCAmelCase : Optional[Any] = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
_lowerCAmelCase : Tuple = os.path.join(__a, __a)
_lowerCAmelCase : Dict = os.path.join(__a, __a)
if not os.path.isfile(__a) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(__a, encoding="utf-8") as f:
_lowerCAmelCase : Dict = f.readlines()
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = []
for line in lines:
_lowerCAmelCase : Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCAmelCase : Union[str, Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_lowerCAmelCase : Optional[Any] = ""
continue
elif "from absl import logging" in out_line:
_lowerCAmelCase : Any = "from datasets import logging\n"
elif "getLogger" in out_line:
_lowerCAmelCase : Tuple = out_line.replace("getLogger", "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[Any] = list(filter(lambda __a: e in out_line, __a))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__a) + "\n")
out_lines.append(__a)
out_lines.append(__a)
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCAmelCase : str = re.sub(__a, __a, __a)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCAmelCase : Any = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __a)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
_lowerCAmelCase : int = "from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCAmelCase : Tuple = True
out_lines.append(__a)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCAmelCase : Union[str, Any] = f_name.replace(".py", "")
_lowerCAmelCase : Any = os.path.join(__a, __a)
_lowerCAmelCase : Optional[int] = os.path.join(__a, __a)
os.makedirs(__a, exist_ok=__a)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(__a)
if needs_manual_update:
with_manual_update.append(__a)
with open(__a, "w", encoding="utf-8") as f:
f.writelines(__a)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
_lowerCAmelCase : Tuple = os.path.basename(__a)
_lowerCAmelCase : List[str] = imports_to_builder_map[f_name.replace(".py", "")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(__a, __a)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.")
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
lowercase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : Dict = tau * frequency / samplerate
_lowerCAmelCase : str = sin(__UpperCamelCase )
_lowerCAmelCase : List[Any] = cos(__UpperCamelCase )
_lowerCAmelCase : int = _sin / (2 * q_factor)
_lowerCAmelCase : Optional[int] = (1 - _cos) / 2
_lowerCAmelCase : Union[str, Any] = 1 - _cos
_lowerCAmelCase : str = 1 + alpha
_lowerCAmelCase : List[Any] = -2 * _cos
_lowerCAmelCase : Tuple = 1 - alpha
_lowerCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : str = tau * frequency / samplerate
_lowerCAmelCase : Union[str, Any] = sin(__UpperCamelCase )
_lowerCAmelCase : Tuple = cos(__UpperCamelCase )
_lowerCAmelCase : List[Any] = _sin / (2 * q_factor)
_lowerCAmelCase : Any = (1 + _cos) / 2
_lowerCAmelCase : List[str] = -1 - _cos
_lowerCAmelCase : Tuple = 1 + alpha
_lowerCAmelCase : str = -2 * _cos
_lowerCAmelCase : Any = 1 - alpha
_lowerCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : Dict = tau * frequency / samplerate
_lowerCAmelCase : Dict = sin(__UpperCamelCase )
_lowerCAmelCase : Optional[int] = cos(__UpperCamelCase )
_lowerCAmelCase : Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase : Dict = _sin / 2
_lowerCAmelCase : int = 0
_lowerCAmelCase : Tuple = -ba
_lowerCAmelCase : Optional[int] = 1 + alpha
_lowerCAmelCase : List[str] = -2 * _cos
_lowerCAmelCase : Tuple = 1 - alpha
_lowerCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase : Any = tau * frequency / samplerate
_lowerCAmelCase : List[Any] = sin(__UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = cos(__UpperCamelCase )
_lowerCAmelCase : Optional[int] = _sin / (2 * q_factor)
_lowerCAmelCase : Tuple = 1 - alpha
_lowerCAmelCase : Union[str, Any] = -2 * _cos
_lowerCAmelCase : Union[str, Any] = 1 + alpha
_lowerCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase : int = tau * frequency / samplerate
_lowerCAmelCase : Optional[Any] = sin(__UpperCamelCase )
_lowerCAmelCase : Tuple = cos(__UpperCamelCase )
_lowerCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_lowerCAmelCase : Tuple = 10 ** (gain_db / 40)
_lowerCAmelCase : Union[str, Any] = 1 + alpha * big_a
_lowerCAmelCase : List[str] = -2 * _cos
_lowerCAmelCase : str = 1 - alpha * big_a
_lowerCAmelCase : List[str] = 1 + alpha / big_a
_lowerCAmelCase : str = -2 * _cos
_lowerCAmelCase : Optional[int] = 1 - alpha / big_a
_lowerCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tau * frequency / samplerate
_lowerCAmelCase : Any = sin(__UpperCamelCase )
_lowerCAmelCase : Tuple = cos(__UpperCamelCase )
_lowerCAmelCase : int = _sin / (2 * q_factor)
_lowerCAmelCase : str = 10 ** (gain_db / 40)
_lowerCAmelCase : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase : str = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase : Any = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase : Dict = 2 * sqrt(__UpperCamelCase ) * alpha
_lowerCAmelCase : Optional[int] = big_a * (pmc + aaa)
_lowerCAmelCase : int = 2 * big_a * mpc
_lowerCAmelCase : List[Any] = big_a * (pmc - aaa)
_lowerCAmelCase : str = ppmc + aaa
_lowerCAmelCase : Tuple = -2 * pmpc
_lowerCAmelCase : Any = ppmc - aaa
_lowerCAmelCase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase : Any = tau * frequency / samplerate
_lowerCAmelCase : List[str] = sin(__UpperCamelCase )
_lowerCAmelCase : Optional[int] = cos(__UpperCamelCase )
_lowerCAmelCase : Tuple = _sin / (2 * q_factor)
_lowerCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
_lowerCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase : str = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase : List[Any] = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase : str = 2 * sqrt(__UpperCamelCase ) * alpha
_lowerCAmelCase : Tuple = big_a * (ppmc + aaa)
_lowerCAmelCase : List[Any] = -2 * big_a * pmpc
_lowerCAmelCase : Optional[Any] = big_a * (ppmc - aaa)
_lowerCAmelCase : int = pmc + aaa
_lowerCAmelCase : Any = 2 * mpc
_lowerCAmelCase : Dict = pmc - aaa
_lowerCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase_ ( _lowerCAmelCase , unittest.TestCase):
lowerCamelCase__ = ShapEPipeline
lowerCamelCase__ = ['prompt']
lowerCamelCase__ = ['prompt']
lowerCamelCase__ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ = False
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case__ ( self):
'''simple docstring'''
return 8
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(_lowerCAmelCase)
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_lowerCAmelCase : List[str] = PriorTransformer(**_lowerCAmelCase)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Tuple = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**_lowerCAmelCase)
return model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Union[str, Any] = self.dummy_renderer
_lowerCAmelCase : str = HeunDiscreteScheduler(
beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=_lowerCAmelCase, clip_sample=_lowerCAmelCase, clip_sample_range=1.0, )
_lowerCAmelCase : Dict = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
if str(_lowerCAmelCase).startswith("mps"):
_lowerCAmelCase : str = torch.manual_seed(_lowerCAmelCase)
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
_lowerCAmelCase : str = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "cpu"
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**_lowerCAmelCase)
_lowerCAmelCase : List[Any] = pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
_lowerCAmelCase : Dict = pipe(**self.get_dummy_inputs(_lowerCAmelCase))
_lowerCAmelCase : Optional[int] = output.images[0]
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Optional[Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch_device == "cpu"
_lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=_lowerCAmelCase, relax_max_difference=_lowerCAmelCase, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**_lowerCAmelCase)
_lowerCAmelCase : Union[str, Any] = pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Dict = self.get_dummy_inputs(_lowerCAmelCase)
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : Dict = batch_size * [inputs[key]]
_lowerCAmelCase : Dict = pipe(**_lowerCAmelCase, num_images_per_prompt=_lowerCAmelCase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
_lowerCAmelCase : int = ShapEPipeline.from_pretrained("openai/shap-e")
_lowerCAmelCase : int = pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_lowerCAmelCase).manual_seed(0)
_lowerCAmelCase : Optional[int] = pipe(
"a shark", generator=_lowerCAmelCase, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase, _lowerCAmelCase)
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_snake_case = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : str = [file for file in os.listdir(_a) if os.path.isfile(os.path.join(_a, _a))]
if identifier is not None:
_lowerCAmelCase : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a, _a):
for n_ in n_identifier:
_lowerCAmelCase : Optional[Any] = [file for file in files if n_ not in file]
else:
_lowerCAmelCase : List[Any] = [file for file in files if n_identifier not in file]
_lowerCAmelCase : Optional[Any] = ignore_files or []
ignore_files.append("__init__.py")
_lowerCAmelCase : Union[str, Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing", _a)
if only_modules:
_lowerCAmelCase : str = file.split(".")[0]
try:
_lowerCAmelCase : Union[str, Any] = getattr(_a, _a)
_lowerCAmelCase : Dict = doctest.DocTestSuite(_a)
_lowerCAmelCase : Tuple = unittest.TextTestRunner().run(_a)
self.assertIs(len(result.failures), 0)
except AttributeError:
logger.info(f"{module_identifier} is not a module.")
else:
_lowerCAmelCase : Any = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed, 0)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Path("src/transformers")
_lowerCAmelCase : Union[str, Any] = """modeling"""
_lowerCAmelCase : Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_a, identifier=_a, ignore_files=_a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = Path("src/transformers")
_lowerCAmelCase : List[str] = """tokenization"""
self.analyze_directory(_a, identifier=_a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Path("src/transformers")
_lowerCAmelCase : Dict = """configuration"""
self.analyze_directory(_a, identifier=_a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Path("src/transformers")
_lowerCAmelCase : Union[str, Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_a, n_identifier=_a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Path("docs/source")
_lowerCAmelCase : int = ["""favicon.ico"""]
self.analyze_directory(_a, ignore_files=_a, only_modules=_a)
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
def A ( ):
'''simple docstring'''
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
_snake_case = generate_large_matrix()
_snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A ( _lowerCamelCase ):
'''simple docstring'''
assert all(row == sorted(__UpperCamelCase , reverse=__UpperCamelCase ) for row in grid )
assert all(list(__UpperCamelCase ) == sorted(__UpperCamelCase , reverse=__UpperCamelCase ) for col in zip(*__UpperCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : int = len(__UpperCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowerCAmelCase : Union[str, Any] = (left + right) // 2
_lowerCAmelCase : Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowerCAmelCase : Optional[int] = mid + 1
else:
_lowerCAmelCase : Any = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Dict = len(grid[0] )
for i in range(len(__UpperCamelCase ) ):
_lowerCAmelCase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCamelCase ) * len(grid[0] )) - total
def A ( _lowerCamelCase ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
for row in grid:
for i, number in enumerate(__UpperCamelCase ):
if number < 0:
total += len(__UpperCamelCase ) - i
break
return total
def A ( ):
'''simple docstring'''
from timeit import timeit
print("Running benchmarks" )
_lowerCAmelCase : Any = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowerCAmelCase : Union[str, Any] = timeit(F"{func}(grid=grid)" , setup=__UpperCamelCase , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if b == 0:
return (1, 0)
((_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = extended_euclid(lowerCamelCase_ , a % b )
_lowerCAmelCase : Union[str, Any] = a // b
return (y, x - k * y)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[Any] = extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : str = na * na
_lowerCAmelCase : List[str] = ra * x * na + ra * y * na
return (n % m + m) % m
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) : Any = extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
if b < 0:
_lowerCAmelCase : Any = (b % n + n) % n
return b
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = invert_modulo(lowerCamelCase_ , lowerCamelCase_ ), invert_modulo(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : List[Any] = na * na
_lowerCAmelCase : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_snake_case = {
"camembert-base": 512,
}
_snake_case = "▁"
class UpperCAmelCase_ ( _A):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = CamembertTokenizer
def __init__( self, __a=None, __a=None, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=["<s>NOTUSED", "</s>NOTUSED"], **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
__a, tokenizer_file=__a, bos_token=__a, eos_token=__a, sep_token=__a, cls_token=__a, unk_token=__a, pad_token=__a, mask_token=__a, additional_special_tokens=__a, **__a, )
_lowerCAmelCase : int = vocab_file
_lowerCAmelCase : Dict = False if not self.vocab_file else True
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
_lowerCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Dict = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
return (out_vocab_file,)
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
import math
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase_ ( UpperCamelCase__):
def __init__( self, __a, __a, __a=1024, __a=1024, __a=3.6):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = tokenizer
_lowerCAmelCase : Optional[int] = tokenizer.bos_token_id
_lowerCAmelCase : int = dataset
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = iter(self.dataset)
_lowerCAmelCase : str = True
while more_examples:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A)["content"])
buffer_len += len(buffer[-1])
except StopIteration:
_lowerCAmelCase : int = False
break
_lowerCAmelCase : str = tokenizer(__A, truncation=__A)["input_ids"]
_lowerCAmelCase : Optional[Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0, len(__A), self.seq_length):
_lowerCAmelCase : Any = all_token_ids[i : i + self.seq_length]
if len(__A) == self.seq_length:
yield torch.tensor(__A)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {"streaming": True}
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name , split="train" , **UpperCamelCase__ )
_lowerCAmelCase : str = ConstantLengthDataset(UpperCamelCase__ , UpperCamelCase__ , seq_length=args.seq_length )
_lowerCAmelCase : Optional[int] = DataLoader(UpperCamelCase__ , batch_size=args.batch_size )
return eval_dataloader
def A ( _lowerCamelCase ):
'''simple docstring'''
model.eval()
_lowerCAmelCase : Dict = []
for step, batch in enumerate(UpperCamelCase__ ):
with torch.no_grad():
_lowerCAmelCase : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
_lowerCAmelCase : int = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCAmelCase : Dict = torch.mean(torch.cat(UpperCamelCase__ ) )
try:
_lowerCAmelCase : Tuple = torch.exp(UpperCamelCase__ )
except OverflowError:
_lowerCAmelCase : List[Any] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
_snake_case = Accelerator()
# Parse configuration
_snake_case = HfArgumentParser(EvaluationArguments)
_snake_case = parser.parse_args()
set_seed(args.seed)
# Logging
_snake_case = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
_snake_case = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_snake_case = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_snake_case = create_dataloader(args)
# Prepare everything with our `accelerator`.
_snake_case, _snake_case = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
_snake_case, _snake_case = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(
__UpperCAmelCase , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class UpperCAmelCase_ ( __UpperCAmelCase):
def snake_case__ ( self, __a):
'''simple docstring'''
if self.framework == "tf":
_lowerCAmelCase : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
_lowerCAmelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=__SCREAMING_SNAKE_CASE)
else:
raise ValueError("Unsupported framework")
return masked_index
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.get_masked_index(__SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"fill-mask", self.model.base_model_prefix, f"No mask_token ({self.tokenizer.mask_token}) found on the input", )
def snake_case__ ( self, __a):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE)
def snake_case__ ( self, __a, __a=None, **__a):
'''simple docstring'''
if return_tensors is None:
_lowerCAmelCase : Optional[int] = self.framework
_lowerCAmelCase : Optional[int] = self.tokenizer(__SCREAMING_SNAKE_CASE, return_tensors=__SCREAMING_SNAKE_CASE)
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE)
return model_inputs
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.model(**__SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[Any] = model_inputs["input_ids"]
return model_outputs
def snake_case__ ( self, __a, __a=5, __a=None):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
_lowerCAmelCase : Union[str, Any] = target_ids.shape[0]
_lowerCAmelCase : Union[str, Any] = model_outputs["input_ids"][0]
_lowerCAmelCase : Optional[Any] = model_outputs["logits"]
if self.framework == "tf":
_lowerCAmelCase : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
_lowerCAmelCase : Tuple = outputs.numpy()
_lowerCAmelCase : Dict = outputs[0, masked_index, :]
_lowerCAmelCase : List[Any] = stable_softmax(__SCREAMING_SNAKE_CASE, axis=-1)
if target_ids is not None:
_lowerCAmelCase : int = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE, 0), target_ids.reshape(-1, 1))
_lowerCAmelCase : Any = tf.expand_dims(__SCREAMING_SNAKE_CASE, 0)
_lowerCAmelCase : str = tf.math.top_k(__SCREAMING_SNAKE_CASE, k=__SCREAMING_SNAKE_CASE)
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
_lowerCAmelCase : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=__SCREAMING_SNAKE_CASE).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
_lowerCAmelCase : str = outputs[0, masked_index, :]
_lowerCAmelCase : Optional[Any] = logits.softmax(dim=-1)
if target_ids is not None:
_lowerCAmelCase : Tuple = probs[..., target_ids]
_lowerCAmelCase , _lowerCAmelCase : Dict = probs.topk(__SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist())):
_lowerCAmelCase : Dict = []
for v, p in zip(_values, _predictions):
# Copy is important since we're going to modify this array in place
_lowerCAmelCase : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
_lowerCAmelCase : Optional[int] = target_ids[p].tolist()
_lowerCAmelCase : int = p
# Filter padding out:
_lowerCAmelCase : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_lowerCAmelCase : int = self.tokenizer.decode(__SCREAMING_SNAKE_CASE, skip_special_tokens=__SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[Any] = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence}
row.append(__SCREAMING_SNAKE_CASE)
result.append(__SCREAMING_SNAKE_CASE)
if single_mask:
return result[0]
return result
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE):
_lowerCAmelCase : str = [targets]
try:
_lowerCAmelCase : Tuple = self.tokenizer.get_vocab()
except Exception:
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : Union[str, Any] = []
for target in targets:
_lowerCAmelCase : str = vocab.get(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE)
if id_ is None:
_lowerCAmelCase : List[str] = self.tokenizer(
__SCREAMING_SNAKE_CASE, add_special_tokens=__SCREAMING_SNAKE_CASE, return_attention_mask=__SCREAMING_SNAKE_CASE, return_token_type_ids=__SCREAMING_SNAKE_CASE, max_length=1, truncation=__SCREAMING_SNAKE_CASE, )["input_ids"]
if len(__SCREAMING_SNAKE_CASE) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it")
continue
_lowerCAmelCase : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.")
target_ids.append(id_)
_lowerCAmelCase : Tuple = list(set(__SCREAMING_SNAKE_CASE))
if len(__SCREAMING_SNAKE_CASE) == 0:
raise ValueError("At least one target must be provided when passed.")
_lowerCAmelCase : int = np.array(__SCREAMING_SNAKE_CASE)
return target_ids
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Tuple = {}
if targets is not None:
_lowerCAmelCase : int = self.get_target_ids(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE)
_lowerCAmelCase : str = target_ids
if top_k is not None:
_lowerCAmelCase : List[Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask", self.model.base_model_prefix, "The tokenizer does not define a `mask_token`.")
return {}, {}, postprocess_params
def __call__( self, __a, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = super().__call__(__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE)
if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE) and len(__SCREAMING_SNAKE_CASE) == 1:
return outputs[0]
return outputs
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"Salesforce/codegen-350M-mono": 2048,
}
class UpperCAmelCase_ ( UpperCamelCase_):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = CodeGenTokenizer
def __init__( self, __a=None, __a=None, __a=None, __a="<|endoftext|>", __a="<|endoftext|>", __a="<|endoftext|>", __a=False, **__a, ):
'''simple docstring'''
super().__init__(
__a, __a, tokenizer_file=__a, unk_token=__a, bos_token=__a, eos_token=__a, add_prefix_space=__a, **__a, )
if kwargs.pop("add_bos_token", __a):
_lowerCAmelCase : List[Any] = kwargs.pop("name_or_path", "")
raise ValueError(
"Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly.")
_lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : Dict = getattr(__a, pre_tok_state.pop("type"))
_lowerCAmelCase : Dict = add_prefix_space
_lowerCAmelCase : Any = pre_tok_class(**__a)
_lowerCAmelCase : Any = add_prefix_space
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.get("is_split_into_words", __a)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = kwargs.get("is_split_into_words", __a)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
def snake_case__ ( self, __a, __a = False, __a = None, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = super().decode(
token_ids=__a, skip_special_tokens=__a, clean_up_tokenization_spaces=__a, **__a, )
if truncate_before_pattern is not None and len(__a) > 0:
_lowerCAmelCase : Tuple = self.truncate(__a, __a)
return decoded_text
def snake_case__ ( self, __a, __a):
'''simple docstring'''
def find_re(__a, __a, __a):
_lowerCAmelCase : List[Any] = pattern.search(__a, __a)
return m.start() if m else -1
_lowerCAmelCase : Union[str, Any] = [re.compile(__a, re.MULTILINE) for pattern in truncate_before_pattern]
_lowerCAmelCase : List[str] = list(re.finditer("^print", __a, re.MULTILINE))
if len(__a) > 1:
_lowerCAmelCase : Tuple = completion[: prints[1].start()]
_lowerCAmelCase : Dict = list(re.finditer("^def", __a, re.MULTILINE))
if len(__a) > 1:
_lowerCAmelCase : Union[str, Any] = completion[: defs[1].start()]
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Optional[int] = [
pos for pos in [find_re(__a, __a, __a) for terminal in terminals] if pos != -1
]
if len(__a) > 0:
return completion[: min(__a)]
else:
return completion
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import os
def A ( _lowerCamelCase = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(_lowercase ) , _lowercase ) ) as input_file:
_lowerCAmelCase : int = [
[int(_lowercase ) for element in line.split("," )]
for line in input_file.readlines()
]
_lowerCAmelCase : Tuple = len(_lowercase )
_lowerCAmelCase : Any = len(matrix[0] )
_lowerCAmelCase : str = [[-1 for _ in range(_lowercase )] for _ in range(_lowercase )]
for i in range(_lowercase ):
_lowerCAmelCase : Any = matrix[i][0]
for j in range(1 , _lowercase ):
for i in range(_lowercase ):
_lowerCAmelCase : str = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _lowercase ):
_lowerCAmelCase : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowerCAmelCase : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( __snake_case):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead.", _lowercase, )
super().__init__(*_lowercase, **_lowercase)
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
print("moving disk from" , lowercase_ , "to" , lowercase_ )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(input("Height of hanoi: " ).strip() )
move_tower(lowercase_ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16_000 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase__ ) <= sample_length:
return wav
_lowerCAmelCase : Optional[int] = randint(0 , len(lowerCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Name of a dataset from the datasets package'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A file containing the training audio paths and labels.'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A file containing the validation audio paths and labels.'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowerCamelCase__ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Name or path of preprocessor config.'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case__ ( self):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", _a, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : Any = DatasetDict()
_lowerCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : Any = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : List[Any] = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCAmelCase : int = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Dict = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = feature_extractor(lowerCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Any = {model_input_name: inputs.get(lowerCAmelCase__ )}
_lowerCAmelCase : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCAmelCase : Tuple = [audio["array"] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : int = feature_extractor(lowerCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Dict = {model_input_name: inputs.get(lowerCAmelCase__ )}
_lowerCAmelCase : int = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : str = raw_datasets["train"].features[data_args.label_column_name].names
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
_lowerCAmelCase : Tuple = str(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : Dict = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : List[str] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=eval_pred.label_ids )
_lowerCAmelCase : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Dict = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase__ , output_all_columns=lowerCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : int = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase__ , output_all_columns=lowerCAmelCase__ )
# Initialize our trainer
_lowerCAmelCase : Optional[Any] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# Training
if training_args.do_train:
_lowerCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : int = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : Tuple = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase__ )
trainer.save_metrics("eval" , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
_lowerCAmelCase : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
from collections import defaultdict
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = first_str.lower().strip()
_lowerCAmelCase : Tuple = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase : Optional[int] = first_str.replace(" " , "" )
_lowerCAmelCase : Optional[int] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase : int = defaultdict(_lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case = input("Enter the first string ").strip()
_snake_case = input("Enter the second string ").strip()
_snake_case = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( snake_case_):
lowerCamelCase__ = """Speech2TextFeatureExtractor"""
lowerCamelCase__ = """Speech2TextTokenizer"""
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
_lowerCAmelCase : List[str] = self.feature_extractor
_lowerCAmelCase : int = False
def __call__( self, *__a, **__a):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__a, **__a)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
_lowerCAmelCase : List[Any] = kwargs.pop("raw_speech")
else:
_lowerCAmelCase : List[str] = kwargs.pop("audio", __a)
_lowerCAmelCase : Optional[Any] = kwargs.pop("sampling_rate", __a)
_lowerCAmelCase : str = kwargs.pop("text", __a)
if len(__a) > 0:
_lowerCAmelCase : str = args[0]
_lowerCAmelCase : Any = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
_lowerCAmelCase : Any = self.feature_extractor(__a, *__a, sampling_rate=__a, **__a)
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(__a, **__a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase : Any = encodings["input_ids"]
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
@contextmanager
def snake_case__ ( self):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Dict = self.tokenizer
yield
_lowerCAmelCase : Dict = self.feature_extractor
_lowerCAmelCase : Union[str, Any] = False
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase_ ( _UpperCAmelCase):
lowerCamelCase__ = 0
lowerCamelCase__ = False
lowerCamelCase__ = 3.0
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(), {})
self.assertDictEqual(MockClass(a=2).to_kwargs(), {"a": 2})
self.assertDictEqual(MockClass(a=2, b=lowercase__).to_kwargs(), {"a": 2, "b": True})
self.assertDictEqual(MockClass(a=2, c=2.25).to_kwargs(), {"a": 2, "c": 2.25})
@require_cuda
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = GradScalerKwargs(init_scale=1024, growth_factor=2)
AcceleratorState._reset_state()
_lowerCAmelCase : List[Any] = Accelerator(mixed_precision="fp16", kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
_lowerCAmelCase : List[str] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale, 1_024.0)
self.assertEqual(scaler._growth_factor, 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor, 0.5)
self.assertEqual(scaler._growth_interval, 2000)
self.assertEqual(scaler._enabled, lowercase__)
@require_multi_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(lowercase__, env=os.environ.copy())
if __name__ == "__main__":
_snake_case = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
_snake_case = torch.nn.Linear(100, 200)
_snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
_snake_case = ""
_snake_case = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=12, __a=7, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a=0.1, __a=0.1, __a=512, __a=0.02, __a=0, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : str = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[Any] = projection_dim
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : List[Any] = bos_token_id
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
_lowerCAmelCase : Any = input_mask.numpy()
_lowerCAmelCase : Optional[Any] = input_mask.shape
_lowerCAmelCase : Union[str, Any] = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(__a):
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(__a)
def snake_case__ ( self):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = TFBlipTextModel(config=__a)
_lowerCAmelCase : Union[str, Any] = model(__a, attention_mask=__a, training=__a)
_lowerCAmelCase : str = model(__a, training=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BlipTextModelTester(self)
_lowerCAmelCase : Tuple = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFBlipTextModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self, __a=True):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__a)
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=None , _lowerCamelCase="no" , _lowerCamelCase="29500" ):
'''simple docstring'''
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
_lowerCAmelCase : Dict = True
elif "IPython" in sys.modules:
_lowerCAmelCase : Dict = 'google.colab' in str(sys.modules["IPython"].get_ipython() )
try:
_lowerCAmelCase : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
_lowerCAmelCase : List[Any] = 8
_lowerCAmelCase : Optional[int] = PrepareForLaunch(__lowercase , distributed_type="TPU" )
print(F"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__lowercase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr="127.0.01" , master_port=__lowercase , mixed_precision=__lowercase ):
_lowerCAmelCase : Tuple = PrepareForLaunch(__lowercase , distributed_type="MULTI_GPU" )
print(F"Launching training on {num_processes} GPUs." )
try:
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_lowerCAmelCase : Optional[Any] = '1'
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__lowercase )
def A ( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
_lowerCAmelCase : Any = PrepareForLaunch(__lowercase , debug=__lowercase )
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( __lowercase):
def __init__( self, __a, __a = None, __a = None, __a = None, __a = False, __a = False, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(
_A, split=_A, features=_A, cache_dir=_A, keep_in_memory=_A, streaming=_A, num_proc=_A, **_A, )
_lowerCAmelCase : Any = field
_lowerCAmelCase : Tuple = path_or_paths if isinstance(_A, _A) else {self.split: path_or_paths}
_lowerCAmelCase : Dict = Json(
cache_dir=_A, data_files=_A, features=_A, field=_A, **_A, )
def snake_case__ ( self):
'''simple docstring'''
if self.streaming:
_lowerCAmelCase : str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = None
self.builder.download_and_prepare(
download_config=_A, download_mode=_A, verification_mode=_A, base_path=_A, num_proc=self.num_proc, )
_lowerCAmelCase : List[Any] = self.builder.as_dataset(
split=self.split, verification_mode=_A, in_memory=self.keep_in_memory)
return dataset
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = None, __a = None, **__a, ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
_lowerCAmelCase : Dict = dataset
_lowerCAmelCase : Union[str, Any] = path_or_buf
_lowerCAmelCase : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase : List[str] = num_proc
_lowerCAmelCase : int = "utf-8"
_lowerCAmelCase : Optional[int] = to_json_kwargs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.to_json_kwargs.pop("path_or_buf", _A)
_lowerCAmelCase : Optional[int] = self.to_json_kwargs.pop("orient", "records")
_lowerCAmelCase : Tuple = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
_lowerCAmelCase : Tuple = self.to_json_kwargs.pop("index", False if orient in ["split", "table"] else True)
_lowerCAmelCase : List[Any] = self.to_json_kwargs.pop("compression", _A)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", compression=_A) as buffer:
_lowerCAmelCase : List[Any] = self._write(file_obj=_A, orient=_A, lines=_A, index=_A, **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead.")
_lowerCAmelCase : Optional[int] = self._write(
file_obj=self.path_or_buf, orient=_A, lines=_A, index=_A, **self.to_json_kwargs)
return written
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = args
_lowerCAmelCase : Dict = query_table(
table=self.dataset.data, key=slice(_A, offset + self.batch_size), indices=self.dataset._indices, )
_lowerCAmelCase : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_A, orient=_A, lines=_A, index=_A, **_A)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def snake_case__ ( self, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size), unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
_lowerCAmelCase : int = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(_A)
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json, [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, _A, _A)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
written += file_obj.write(_A)
return written
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.