code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def __UpperCAmelCase ( __magic_name__="ro" ,__magic_name__="en" ,__magic_name__="wmt16" ,__magic_name__=None )-> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
snake_case_ : Union[str, Any] = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
snake_case_ : str = datasets.load_dataset(__magic_name__ ,__magic_name__ )
if save_dir is None:
snake_case_ : List[Any] = F'''{dataset}-{pair}'''
snake_case_ : Optional[int] = Path(__magic_name__ )
save_dir.mkdir(exist_ok=__magic_name__ )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
snake_case_ : int = "val" if split == "validation" else split
snake_case_ : int = save_dir.joinpath(F'''{fn}.source''' )
snake_case_ : Optional[Any] = save_dir.joinpath(F'''{fn}.target''' )
snake_case_ : Optional[int] = src_path.open("w+" )
snake_case_ : List[Any] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case_ : Optional[int] = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 720 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = filter(lambda __magic_name__ : p.requires_grad ,model.parameters() )
snake_case_ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
if metric == "rouge2":
snake_case_ : List[str] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
snake_case_ : str = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
snake_case_ : str = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
snake_case_ : Any = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
snake_case_ : Optional[Any] = ModelCheckpoint(
dirpath=__magic_name__ ,filename=__magic_name__ ,monitor=F'''val_{metric}''' ,mode="max" ,save_top_k=1 ,every_n_epochs=1 ,)
return checkpoint_callback
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
return EarlyStopping(
monitor=F'''val_{metric}''' ,mode="min" if "loss" in metric else "max" ,patience=__magic_name__ ,verbose=__magic_name__ ,)
class A_ (pl.Callback ):
"""simple docstring"""
def _A ( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase__ )
@rank_zero_only
def _A ( self :int , lowerCAmelCase__ :pl.Trainer , lowerCAmelCase__ :pl.LightningModule , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict=True ) -> None:
'''simple docstring'''
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
snake_case_ : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
snake_case_ : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case_ : str = od / "test_results.txt"
snake_case_ : List[str] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case_ : Dict = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
snake_case_ : Optional[int] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
with open(lowerCAmelCase__ , "a+" ) as writer:
for key in sorted(lowerCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case_ : Dict = metrics[key]
if isinstance(lowerCAmelCase__ , torch.Tensor ):
snake_case_ : str = val.item()
snake_case_ : Optional[int] = F'''{key}: {val:.6f}\n'''
writer.write(lowerCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
snake_case_ : str = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCAmelCase__ )
@rank_zero_only
def _A ( self :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
try:
snake_case_ : Dict = pl_module.model.model.num_parameters()
except AttributeError:
snake_case_ : Dict = pl_module.model.num_parameters()
snake_case_ : Optional[Any] = count_trainable_parameters(lowerCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _A ( self :Any , lowerCAmelCase__ :pl.Trainer , lowerCAmelCase__ :pl.LightningModule ) -> int:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , "test" )
@rank_zero_only
def _A ( self :Dict , lowerCAmelCase__ :pl.Trainer , lowerCAmelCase__ :Any ) -> Tuple:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 721 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 0 |
'''simple docstring'''
__lowerCamelCase : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__lowerCamelCase : str = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__lowerCamelCase : List[str] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__lowerCamelCase : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__lowerCamelCase : str = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__lowerCamelCase : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__lowerCamelCase : Optional[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__lowerCamelCase : Union[str, Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 701 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''megatron-bert'''
def __init__( self :List[str] , lowerCAmelCase__ :str=29_056 , lowerCAmelCase__ :Tuple=1_024 , lowerCAmelCase__ :int=24 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :List[str]=4_096 , lowerCAmelCase__ :List[str]="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Union[str, Any]=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]="absolute" , lowerCAmelCase__ :Tuple=True , **lowerCAmelCase__ :Dict , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : List[Any] = position_embedding_type
snake_case_ : str = use_cache
| 702 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : str = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Dict = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase : Tuple = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A_ :
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=sys.maxsize ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = "bilinear"
snake_case_ : str = max_size
snake_case_ : Union[str, Any] = short_edge_length
def __call__( self :Tuple , lowerCAmelCase__ :Any ) -> int:
'''simple docstring'''
snake_case_ : Dict = []
for img in imgs:
snake_case_ : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case_ : List[str] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
snake_case_ : int = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
snake_case_ : Optional[Any] = size, scale * w
else:
snake_case_ : int = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size:
snake_case_ : str = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Any = newh * scale
snake_case_ : Any = neww * scale
snake_case_ : Dict = int(neww + 0.5 )
snake_case_ : List[str] = int(newh + 0.5 )
if img.dtype == np.uinta:
snake_case_ : Dict = Image.fromarray(lowerCAmelCase__ )
snake_case_ : List[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
snake_case_ : List[Any] = np.asarray(lowerCAmelCase__ )
else:
snake_case_ : List[Any] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
snake_case_ : Optional[Any] = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 )
img_augs.append(lowerCAmelCase__ )
return img_augs
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
snake_case_ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
snake_case_ : Optional[Any] = cfg.INPUT.FORMAT
snake_case_ : List[str] = cfg.SIZE_DIVISIBILITY
snake_case_ : Tuple = cfg.PAD_VALUE
snake_case_ : Tuple = cfg.INPUT.MAX_SIZE_TEST
snake_case_ : Any = cfg.MODEL.DEVICE
snake_case_ : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ : List[Any] = lambda lowerCAmelCase__ : (x - self.pixel_mean) / self.pixel_std
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) )
snake_case_ : List[Any] = [im.shape[-2:] for im in images]
snake_case_ : Any = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
def __call__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any]=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = [images]
if single_image:
assert len(lowerCAmelCase__ ) == 1
for i in range(len(lowerCAmelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
snake_case_ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
snake_case_ : Any = self.aug(lowerCAmelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case_ : str = [self.normalizer(lowerCAmelCase__ ) for x in images]
# now pad them to do the following operations
snake_case_ : List[str] = self.pad(lowerCAmelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case_ : List[Any] = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
assert torch.isfinite(__magic_name__ ).all(), "Box tensor contains infinite or NaN!"
snake_case_ : str = box_size
tensor[:, 0].clamp_(min=0 ,max=__magic_name__ )
tensor[:, 1].clamp_(min=0 ,max=__magic_name__ )
tensor[:, 2].clamp_(min=0 ,max=__magic_name__ )
tensor[:, 3].clamp_(min=0 ,max=__magic_name__ )
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
snake_case_ : List[Any] = len(__magic_name__ )
print("The following activities are selected:" )
# The first activity is always selected
snake_case_ : str = 0
print(__magic_name__ ,end="," )
# Consider rest of the activities
for j in range(__magic_name__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__magic_name__ ,end="," )
snake_case_ : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : int = [1, 3, 0, 5, 8, 5]
__lowerCamelCase : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 706 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
'''simple docstring'''
__lowerCamelCase : Dict = range(2, 20 + 1)
__lowerCamelCase : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Dict = sum(a_i[j] for j in range(__magic_name__ ,len(__magic_name__ ) ) )
snake_case_ : Any = sum(a_i[j] * base[j] for j in range(min(len(__magic_name__ ) ,__magic_name__ ) ) )
snake_case_ : Any = 0, 0
snake_case_ : Union[str, Any] = n - i
snake_case_ : str = memo.get(__magic_name__ )
if sub_memo is not None:
snake_case_ : Tuple = sub_memo.get(__magic_name__ )
if jumps is not None and len(__magic_name__ ) > 0:
# find and make the largest jump without going over
snake_case_ : Any = -1
for _k in range(len(__magic_name__ ) - 1 ,-1 ,-1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ : Optional[int] = _k
break
if max_jump >= 0:
snake_case_ : int = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ : Any = diff + c
for j in range(min(__magic_name__ ,len(__magic_name__ ) ) ):
snake_case_ : Dict = divmod(__magic_name__ ,10 )
if new_c > 0:
add(__magic_name__ ,__magic_name__ ,__magic_name__ )
else:
snake_case_ : Tuple = []
else:
snake_case_ : Optional[Any] = {c: []}
snake_case_ : Dict = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ : Optional[int] = next_term(__magic_name__ ,k - 1 ,i + dn ,__magic_name__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ : str = compute(__magic_name__ ,__magic_name__ ,i + dn ,__magic_name__ )
diff += _diff
dn += terms_jumped
snake_case_ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ : Any = 0
while j < len(__magic_name__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__magic_name__ ,(diff, dn, k) )
return (diff, dn)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__magic_name__ ):
a_i.extend([0 for _ in range(k - len(__magic_name__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ : int = i
snake_case_ : Optional[Any] = 0, 0, 0
for j in range(len(__magic_name__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ : str = ds_c + ds_b
diff += addend
snake_case_ : Union[str, Any] = 0
for j in range(__magic_name__ ):
snake_case_ : Optional[Any] = a_i[j] + addend
snake_case_ : Optional[Any] = divmod(__magic_name__ ,10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__magic_name__ ,__magic_name__ ,__magic_name__ )
return diff, i - start_i
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
for j in range(__magic_name__ ,len(__magic_name__ ) ):
snake_case_ : Tuple = digits[j] + addend
if s >= 10:
snake_case_ : Optional[int] = divmod(__magic_name__ ,10 )
snake_case_ : str = addend // 10 + quotient
else:
snake_case_ : Optional[Any] = s
snake_case_ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_ : Optional[int] = divmod(__magic_name__ ,10 )
digits.append(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 10**15 )-> int:
"""simple docstring"""
snake_case_ : List[str] = [1]
snake_case_ : Optional[Any] = 1
snake_case_ : Any = 0
while True:
snake_case_ : Optional[Any] = next_term(__magic_name__ ,20 ,i + dn ,__magic_name__ )
dn += terms_jumped
if dn == n - i:
break
snake_case_ : Optional[Any] = 0
for j in range(len(__magic_name__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 0 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = VideoMAEConfig()
set_architecture_configs(__magic_name__ ,__magic_name__ )
if "finetuned" not in model_name:
snake_case_ : Union[str, Any] = False
if "finetuned" in model_name:
snake_case_ : str = "huggingface/label-files"
if "kinetics" in model_name:
snake_case_ : List[str] = 400
snake_case_ : int = "kinetics400-id2label.json"
elif "ssv2" in model_name:
snake_case_ : Dict = 174
snake_case_ : Union[str, Any] = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
snake_case_ : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
if "small" in model_name:
snake_case_ : str = 384
snake_case_ : str = 1536
snake_case_ : str = 12
snake_case_ : Optional[int] = 16
snake_case_ : List[Any] = 12
snake_case_ : Optional[int] = 3
snake_case_ : Optional[int] = 192
snake_case_ : Any = 768
elif "large" in model_name:
snake_case_ : Any = 1024
snake_case_ : Union[str, Any] = 4096
snake_case_ : Optional[int] = 24
snake_case_ : str = 16
snake_case_ : Tuple = 12
snake_case_ : int = 8
snake_case_ : Optional[int] = 512
snake_case_ : Any = 2048
elif "huge" in model_name:
snake_case_ : Tuple = 1280
snake_case_ : Dict = 5120
snake_case_ : int = 32
snake_case_ : Optional[Any] = 16
snake_case_ : List[Any] = 12
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = 640
snake_case_ : List[str] = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
if "encoder." in name:
snake_case_ : Any = name.replace("encoder." ,"" )
if "cls_token" in name:
snake_case_ : Dict = name.replace("cls_token" ,"videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
snake_case_ : List[str] = name.replace("decoder_pos_embed" ,"decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
snake_case_ : List[Any] = name.replace("pos_embed" ,"videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case_ : str = name.replace("patch_embed.proj" ,"videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ : int = name.replace("patch_embed.norm" ,"videomae.embeddings.norm" )
if "decoder.blocks" in name:
snake_case_ : Any = name.replace("decoder.blocks" ,"decoder.decoder_layers" )
if "blocks" in name:
snake_case_ : Optional[int] = name.replace("blocks" ,"videomae.encoder.layer" )
if "attn.proj" in name:
snake_case_ : Union[str, Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name and "bias" not in name:
snake_case_ : List[Any] = name.replace("attn" ,"attention.self" )
if "attn" in name:
snake_case_ : Dict = name.replace("attn" ,"attention.attention" )
if "norm1" in name:
snake_case_ : List[Any] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
snake_case_ : Union[str, Any] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
snake_case_ : int = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ : Any = name.replace("mlp.fc2" ,"output.dense" )
if "decoder_embed" in name:
snake_case_ : Optional[int] = name.replace("decoder_embed" ,"decoder.decoder_embed" )
if "decoder_norm" in name:
snake_case_ : str = name.replace("decoder_norm" ,"decoder.decoder_norm" )
if "decoder_pred" in name:
snake_case_ : List[Any] = name.replace("decoder_pred" ,"decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case_ : Union[str, Any] = name.replace("norm.weight" ,"videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case_ : str = name.replace("norm.bias" ,"videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
snake_case_ : Any = name.replace("head" ,"classifier" )
return name
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ : Optional[Any] = orig_state_dict.pop(__magic_name__ )
if key.startswith("encoder." ):
snake_case_ : Dict = key.replace("encoder." ,"" )
if "qkv" in key:
snake_case_ : List[Any] = key.split("." )
if key.startswith("decoder.blocks" ):
snake_case_ : Any = config.decoder_hidden_size
snake_case_ : Optional[int] = int(key_split[2] )
snake_case_ : Dict = "decoder.decoder_layers."
if "weight" in key:
snake_case_ : int = val[:dim, :]
snake_case_ : Union[str, Any] = val[dim : dim * 2, :]
snake_case_ : Dict = val[-dim:, :]
else:
snake_case_ : Tuple = config.hidden_size
snake_case_ : List[str] = int(key_split[1] )
snake_case_ : str = "videomae.encoder.layer."
if "weight" in key:
snake_case_ : List[str] = val[:dim, :]
snake_case_ : List[Any] = val[dim : dim * 2, :]
snake_case_ : Any = val[-dim:, :]
else:
snake_case_ : Any = val
return orig_state_dict
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
snake_case_ : List[Any] = np.load(__magic_name__ )
return list(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = get_videomae_config(__magic_name__ )
if "finetuned" in model_name:
snake_case_ : Optional[int] = VideoMAEForVideoClassification(__magic_name__ )
else:
snake_case_ : int = VideoMAEForPreTraining(__magic_name__ )
# download original checkpoint, hosted on Google Drive
snake_case_ : Any = "pytorch_model.bin"
gdown.cached_download(__magic_name__ ,__magic_name__ ,quiet=__magic_name__ )
snake_case_ : List[Any] = torch.load(__magic_name__ ,map_location="cpu" )
if "model" in files:
snake_case_ : Union[str, Any] = files["model"]
else:
snake_case_ : int = files["module"]
snake_case_ : Optional[Any] = convert_state_dict(__magic_name__ ,__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify model on basic input
snake_case_ : Union[str, Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
snake_case_ : List[str] = prepare_video()
snake_case_ : Union[str, Any] = image_processor(__magic_name__ ,return_tensors="pt" )
if "finetuned" not in model_name:
snake_case_ : Any = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" ,filename="bool_masked_pos.pt" )
snake_case_ : Dict = torch.load(__magic_name__ )
snake_case_ : Any = model(**__magic_name__ )
snake_case_ : Tuple = outputs.logits
snake_case_ : Tuple = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case_ : Any = torch.Size([1, 400] )
snake_case_ : List[Any] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case_ : List[Any] = torch.Size([1, 174] )
snake_case_ : Union[str, Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
snake_case_ : Dict = torch.Size([1, 1408, 1536] )
snake_case_ : Dict = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
snake_case_ : Union[str, Any] = torch.Size([1, 1408, 1536] )
snake_case_ : Optional[int] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case_ : List[str] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
snake_case_ : Any = torch.Size([1, 1408, 1536] )
snake_case_ : List[str] = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case_ : List[Any] = torch.Size([1, 400] )
snake_case_ : Optional[int] = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case_ : List[str] = torch.Size([1, 400] )
snake_case_ : Union[str, Any] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case_ : List[Any] = torch.Size([1, 400] )
snake_case_ : int = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case_ : Dict = torch.Size([1, 400] )
snake_case_ : str = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
snake_case_ : Optional[Any] = torch.Size([1, 1408, 1536] )
snake_case_ : Optional[int] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case_ : Dict = torch.Size([1, 174] )
snake_case_ : str = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
snake_case_ : Union[str, Any] = torch.Size([1, 1408, 1536] )
snake_case_ : List[str] = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case_ : int = torch.Size([1, 174] )
snake_case_ : Any = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] ,__magic_name__ ,atol=1E-4 )
else:
print("Logits:" ,logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] ,__magic_name__ ,atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case_ : Tuple = outputs.loss
assert torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
model.save_pretrained(__magic_name__ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__magic_name__ ,organization="nielsr" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase : Any = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCamelCase : Union[str, Any] = 250004
__lowerCamelCase : Dict = 250020
@require_sentencepiece
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MBartTokenizer
a__ = MBartTokenizerFast
a__ = True
a__ = True
def _A ( self :Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : int = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[str] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case_ : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Any = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
snake_case_ : str = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
snake_case_ : Any = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/mbart-large-en-ro'''
a__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
a__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
a__ = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def _A ( cls :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
snake_case_ : int = 1
return cls
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
snake_case_ : int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
snake_case_ : Any = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
snake_case_ : Any = 10
snake_case_ : str = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :str ) -> str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_026, 250_001] )
def _A ( self :Tuple ) -> int:
'''simple docstring'''
snake_case_ : List[str] = tempfile.mkdtemp()
snake_case_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def _A ( self :str ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Optional[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
snake_case_ : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case_ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt" )
snake_case_ : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt" )
snake_case_ : Optional[Any] = targets["input_ids"]
snake_case_ : int = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3_034, 2, 250_004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 710 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __UpperCAmelCase ( __magic_name__ = 150_0000 )-> int:
"""simple docstring"""
snake_case_ : defaultdict = defaultdict(__magic_name__ )
snake_case_ : Tuple = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,__magic_name__ ,2 ):
if gcd(__magic_name__ ,__magic_name__ ) > 1:
continue
snake_case_ : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__magic_name__ ,limit + 1 ,__magic_name__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 711 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__lowerCamelCase : str = getLogger(__name__)
__lowerCamelCase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = 8 ,__magic_name__ = DEFAULT_DEVICE ,__magic_name__=False ,__magic_name__="summarization" ,__magic_name__=None ,**__magic_name__ ,)-> Dict:
"""simple docstring"""
snake_case_ : int = Path(__magic_name__ ).open("w" ,encoding="utf-8" )
snake_case_ : Union[str, Any] = str(__magic_name__ )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).to(__magic_name__ )
if fpaa:
snake_case_ : Union[str, Any] = model.half()
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
snake_case_ : int = time.time()
# update config with task specific params
use_task_specific_params(__magic_name__ ,__magic_name__ )
if prefix is None:
snake_case_ : Tuple = prefix or getattr(model.config ,"prefix" ,"" ) or ""
for examples_chunk in tqdm(list(chunks(__magic_name__ ,__magic_name__ ) ) ):
snake_case_ : List[Any] = [prefix + text for text in examples_chunk]
snake_case_ : Tuple = tokenizer(__magic_name__ ,return_tensors="pt" ,truncation=__magic_name__ ,padding="longest" ).to(__magic_name__ )
snake_case_ : Dict = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**__magic_name__ ,)
snake_case_ : int = tokenizer.batch_decode(__magic_name__ ,skip_special_tokens=__magic_name__ ,clean_up_tokenization_spaces=__magic_name__ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
snake_case_ : Any = int(time.time() - start_time ) # seconds
snake_case_ : Optional[Any] = len(__magic_name__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def __UpperCAmelCase ( __magic_name__=True )-> Dict:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("model_name" ,type=__magic_name__ ,help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" ,type=__magic_name__ ,help="like cnn_dm/test.source" )
parser.add_argument("save_path" ,type=__magic_name__ ,help="where to save summaries" )
parser.add_argument("--reference_path" ,type=__magic_name__ ,required=__magic_name__ ,help="like cnn_dm/test.target" )
parser.add_argument("--score_path" ,type=__magic_name__ ,required=__magic_name__ ,default="metrics.json" ,help="where to save metrics" )
parser.add_argument("--device" ,type=__magic_name__ ,required=__magic_name__ ,default=__magic_name__ ,help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" ,type=__magic_name__ ,required=__magic_name__ ,default=__magic_name__ ,help="will be added to the begininng of src examples" )
parser.add_argument("--task" ,type=__magic_name__ ,default="summarization" ,help="used for task_specific_params + metrics" )
parser.add_argument("--bs" ,type=__magic_name__ ,default=8 ,required=__magic_name__ ,help="batch size" )
parser.add_argument(
"--n_obs" ,type=__magic_name__ ,default=-1 ,required=__magic_name__ ,help="How many observations. Defaults to all." )
parser.add_argument("--fp16" ,action="store_true" )
parser.add_argument("--dump-args" ,action="store_true" ,help="print the custom hparams with the results" )
parser.add_argument(
"--info" ,nargs="?" ,type=__magic_name__ ,const=datetime_now() ,help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
snake_case_ : List[Any] = parser.parse_known_args()
snake_case_ : List[Any] = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
snake_case_ : Dict = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
snake_case_ : Dict = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__magic_name__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
snake_case_ : int = generate_summaries_or_translations(
__magic_name__ ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**__magic_name__ ,)
if args.reference_path is None:
return {}
# Compute scores
snake_case_ : Dict = calculate_bleu if "translation" in args.task else calculate_rouge
snake_case_ : str = [x.rstrip() for x in open(args.save_path ).readlines()]
snake_case_ : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__magic_name__ )]
snake_case_ : dict = score_fn(__magic_name__ ,__magic_name__ )
scores.update(__magic_name__ )
if args.dump_args:
scores.update(__magic_name__ )
if args.info:
snake_case_ : Union[str, Any] = args.info
if verbose:
print(__magic_name__ )
if args.score_path is not None:
json.dump(__magic_name__ ,open(args.score_path ,"w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 712 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 | 0 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__lowerCamelCase : int = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
__lowerCamelCase : List[Any] = parser.parse_args()
if args.check_lib:
__lowerCamelCase : Union[str, Any] = importlib.import_module('''transformers''')
__lowerCamelCase : int = Path(transformers_module.__file__).parent
else:
__lowerCamelCase : Optional[int] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 713 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCamelCase : Tuple = '''src/transformers'''
__lowerCamelCase : Any = '''docs/source/en'''
__lowerCamelCase : str = '''.'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
with open(__magic_name__ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
snake_case_ : Optional[Any] = f.readlines()
# Find the start prompt.
snake_case_ : Optional[Any] = 0
while not lines[start_index].startswith(__magic_name__ ):
start_index += 1
start_index += 1
snake_case_ : List[str] = start_index
while not lines[end_index].startswith(__magic_name__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCamelCase : str = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
__lowerCamelCase : Tuple = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__lowerCamelCase : List[Any] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCamelCase : Dict = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,__magic_name__ )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = 2 if text == "✅" or text == "❌" else len(__magic_name__ )
snake_case_ : List[Any] = (width - text_length) // 2
snake_case_ : str = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ : int = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
snake_case_ : int = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
snake_case_ : Union[str, Any] = collections.defaultdict(__magic_name__ )
snake_case_ : List[Any] = collections.defaultdict(__magic_name__ )
snake_case_ : Dict = collections.defaultdict(__magic_name__ )
snake_case_ : List[str] = collections.defaultdict(__magic_name__ )
snake_case_ : Optional[Any] = collections.defaultdict(__magic_name__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(__magic_name__ ):
snake_case_ : Optional[Any] = None
if attr_name.endswith("Tokenizer" ):
snake_case_ : Any = slow_tokenizers
snake_case_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
snake_case_ : List[Any] = fast_tokenizers
snake_case_ : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(__magic_name__ ) is not None:
snake_case_ : str = tf_models
snake_case_ : List[Any] = _re_tf_models.match(__magic_name__ ).groups()[0]
elif _re_flax_models.match(__magic_name__ ) is not None:
snake_case_ : int = flax_models
snake_case_ : Tuple = _re_flax_models.match(__magic_name__ ).groups()[0]
elif _re_pt_models.match(__magic_name__ ) is not None:
snake_case_ : List[Any] = pt_models
snake_case_ : Union[str, Any] = _re_pt_models.match(__magic_name__ ).groups()[0]
if lookup_dict is not None:
while len(__magic_name__ ) > 0:
if attr_name in model_name_to_prefix.values():
snake_case_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
snake_case_ : Any = "".join(camel_case_split(__magic_name__ )[:-1] )
# Let's build that table!
snake_case_ : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
snake_case_ : List[Any] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
snake_case_ : List[Any] = [len(__magic_name__ ) + 2 for c in columns]
snake_case_ : Optional[Any] = max([len(__magic_name__ ) for name in model_names] ) + 2
# Build the table per se
snake_case_ : Optional[Any] = "|" + "|".join([_center_text(__magic_name__ ,__magic_name__ ) for c, w in zip(__magic_name__ ,__magic_name__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
snake_case_ : Optional[Any] = {True: "✅", False: "❌"}
for name in model_names:
snake_case_ : List[Any] = model_name_to_prefix[name]
snake_case_ : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__magic_name__ ,__magic_name__ ) for l, w in zip(__magic_name__ ,__magic_name__ )] ) + "|\n"
return table
def __UpperCAmelCase ( __magic_name__=False )-> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = _find_text_in_file(
filename=os.path.join(__magic_name__ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
snake_case_ : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__magic_name__ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowerCamelCase : List[str] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 714 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase : str = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
a__ = ['''input_features''', '''attention_mask''']
def __init__( self :Optional[Any] , lowerCAmelCase__ :str=80 , lowerCAmelCase__ :Optional[Any]=16_000 , lowerCAmelCase__ :List[str]=80 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Union[str, Any]=True , **lowerCAmelCase__ :Tuple , ) -> List[str]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[str] = num_mel_bins
snake_case_ : Any = do_ceptral_normalize
snake_case_ : Tuple = normalize_means
snake_case_ : Optional[int] = normalize_vars
snake_case_ : Dict = True
def _A ( self :List[Any] , lowerCAmelCase__ :np.ndarray , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
snake_case_ : Dict = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
snake_case_ : Union[str, Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _A ( lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[bool] = True , lowerCAmelCase__ :Optional[bool] = True , lowerCAmelCase__ :float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
snake_case_ : int = x[:input_length].mean(axis=0 )
snake_case_ : List[str] = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
snake_case_ : Union[str, Any] = x[:input_length].std(axis=0 )
snake_case_ : int = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
snake_case_ : Union[str, Any] = padding_value
# make sure array is in float32
snake_case_ : Union[str, Any] = x.astype(np.floataa )
return x
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[np.ndarray] , lowerCAmelCase__ :Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
snake_case_ : int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self :Union[str, Any] , lowerCAmelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , **lowerCAmelCase__ :Optional[int] , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
snake_case_ : Optional[int] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ : Union[str, Any] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
snake_case_ : Optional[int] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Tuple = [raw_speech]
# extract fbank features
snake_case_ : int = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
snake_case_ : int = BatchFeature({"input_features": features} )
snake_case_ : Optional[int] = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
snake_case_ : str = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
snake_case_ : Tuple = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
snake_case_ : Any = padded_inputs.get("attention_mask" )
if attention_mask is not None:
snake_case_ : str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case_ : int = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case_ : Dict = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
snake_case_ : List[str] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 715 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ : str = str(bin(__magic_name__ ) )
binary_number += "0" * shift_amount
return binary_number
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ : int = str(bin(__magic_name__ ) )[2:]
if shift_amount >= len(__magic_name__ ):
return "0b0"
snake_case_ : Dict = binary_number[: len(__magic_name__ ) - shift_amount]
return "0b" + shifted_binary_number
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
snake_case_ : Union[str, Any] = "0" + str(bin(__magic_name__ ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ : Union[str, Any] = len(bin(__magic_name__ )[3:] ) # Find 2's complement of number
snake_case_ : Any = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
snake_case_ : int = (
"1" + "0" * (binary_number_length - len(__magic_name__ )) + binary_number
)
if shift_amount >= len(__magic_name__ ):
return "0b" + binary_number[0] * len(__magic_name__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__magic_name__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
__lowerCamelCase : Tuple = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
__lowerCamelCase : Tuple = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
__lowerCamelCase : str = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _A ( self :int ) -> Any:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _A ( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :str="uniform_average" , lowerCAmelCase__ :Union[str, Any]=True ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse}
| 717 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__magic_name__ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,)
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : Tuple = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case_ : Optional[int] = math.log(len(__magic_name__ ) ,2 )
print("Optimal value : " ,end="" )
print(minimax(0 ,0 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 718 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 0 |
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
for ch in input_str:
snake_case_ : int = ord(__magic_name__ )
snake_case_ : Tuple = pow(2 ,__magic_name__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
"""simple docstring"""
a__ = 42
a__ = 42
def __init__( self :int , lowerCAmelCase__ :UNetaDModel , lowerCAmelCase__ :KarrasVeScheduler ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self :List[str] , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.unet.config.sample_size
snake_case_ : Optional[int] = (batch_size, 3, img_size, img_size)
snake_case_ : str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
snake_case_ : str = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
snake_case_ : List[str] = self.scheduler.schedule[t]
snake_case_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
snake_case_ : Union[str, Any] = self.scheduler.add_noise_to_input(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
snake_case_ : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
snake_case_ : str = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
snake_case_ : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
snake_case_ : List[str] = self.scheduler.step_correct(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , step_output.prev_sample , step_output["derivative"] , )
snake_case_ : Tuple = step_output.prev_sample
snake_case_ : Dict = (sample / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 720 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = args.pruning_method
snake_case_ : Union[str, Any] = args.threshold
snake_case_ : str = args.model_name_or_path.rstrip("/" )
snake_case_ : int = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
snake_case_ : Optional[int] = torch.load(os.path.join(__magic_name__ ,"pytorch_model.bin" ) )
snake_case_ : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case_ : List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
snake_case_ : Optional[Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
snake_case_ : Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
snake_case_ : int = MagnitudeBinarizer.apply(inputs=__magic_name__ ,threshold=__magic_name__ )
snake_case_ : int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case_ : Any = name[:-6]
snake_case_ : Any = model[F'''{prefix_}mask_scores''']
snake_case_ : Union[str, Any] = TopKBinarizer.apply(__magic_name__ ,__magic_name__ )
snake_case_ : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case_ : Optional[Any] = name[:-6]
snake_case_ : Any = model[F'''{prefix_}mask_scores''']
snake_case_ : Tuple = ThresholdBinarizer.apply(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case_ : List[str] = name[:-6]
snake_case_ : List[Any] = model[F'''{prefix_}mask_scores''']
snake_case_ : Dict = -0.1, 1.1
snake_case_ : Optional[Any] = torch.sigmoid(__magic_name__ )
snake_case_ : Union[str, Any] = s * (r - l) + l
snake_case_ : Optional[int] = s_bar.clamp(min=0.0 ,max=1.0 )
snake_case_ : Dict = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
snake_case_ : int = os.path.join(
os.path.dirname(__magic_name__ ) ,F'''bertarized_{os.path.basename(__magic_name__ )}''' )
if not os.path.isdir(__magic_name__ ):
shutil.copytree(__magic_name__ ,__magic_name__ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(__magic_name__ ,os.path.join(__magic_name__ ,"pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__lowerCamelCase : Dict = parser.parse_args()
main(args)
| 721 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
snake_case_ : List[str] = key.replace("module.encoder" ,"glpn.encoder" )
if key.startswith("module.decoder" ):
snake_case_ : int = key.replace("module.decoder" ,"decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case_ : Union[str, Any] = key[key.find("patch_embed" ) + len("patch_embed" )]
snake_case_ : Dict = key.replace(F'''patch_embed{idx}''' ,F'''patch_embeddings.{int(__magic_name__ )-1}''' )
if "norm" in key:
snake_case_ : Dict = key.replace("norm" ,"layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case_ : int = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
snake_case_ : Tuple = key.replace(F'''layer_norm{idx}''' ,F'''layer_norm.{int(__magic_name__ )-1}''' )
if "layer_norm1" in key:
snake_case_ : Any = key.replace("layer_norm1" ,"layer_norm_1" )
if "layer_norm2" in key:
snake_case_ : Any = key.replace("layer_norm2" ,"layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
snake_case_ : str = key[key.find("block" ) + len("block" )]
snake_case_ : Tuple = key.replace(F'''block{idx}''' ,F'''block.{int(__magic_name__ )-1}''' )
if "attn.q" in key:
snake_case_ : Any = key.replace("attn.q" ,"attention.self.query" )
if "attn.proj" in key:
snake_case_ : Tuple = key.replace("attn.proj" ,"attention.output.dense" )
if "attn" in key:
snake_case_ : Optional[int] = key.replace("attn" ,"attention.self" )
if "fc1" in key:
snake_case_ : Dict = key.replace("fc1" ,"dense1" )
if "fc2" in key:
snake_case_ : Any = key.replace("fc2" ,"dense2" )
if "linear_pred" in key:
snake_case_ : List[str] = key.replace("linear_pred" ,"classifier" )
if "linear_fuse" in key:
snake_case_ : Dict = key.replace("linear_fuse.conv" ,"linear_fuse" )
snake_case_ : Any = key.replace("linear_fuse.bn" ,"batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case_ : Optional[Any] = key[key.find("linear_c" ) + len("linear_c" )]
snake_case_ : Union[str, Any] = key.replace(F'''linear_c{idx}''' ,F'''linear_c.{int(__magic_name__ )-1}''' )
if "bot_conv" in key:
snake_case_ : Union[str, Any] = key.replace("bot_conv" ,"0.convolution" )
if "skip_conv1" in key:
snake_case_ : Any = key.replace("skip_conv1" ,"1.convolution" )
if "skip_conv2" in key:
snake_case_ : List[str] = key.replace("skip_conv2" ,"2.convolution" )
if "fusion1" in key:
snake_case_ : Union[str, Any] = key.replace("fusion1" ,"1.fusion" )
if "fusion2" in key:
snake_case_ : Optional[Any] = key.replace("fusion2" ,"2.fusion" )
if "fusion3" in key:
snake_case_ : int = key.replace("fusion3" ,"3.fusion" )
if "fusion" in key and "conv" in key:
snake_case_ : List[Any] = key.replace("conv" ,"convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
snake_case_ : Optional[int] = key.replace("module.last_layer_depth" ,"head.head" )
snake_case_ : List[str] = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case_ : Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
snake_case_ : Optional[Any] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
snake_case_ : str = kv_weight[
: config.hidden_sizes[i], :
]
snake_case_ : Dict = kv_bias[: config.hidden_sizes[i]]
snake_case_ : Optional[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case_ : Any = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : List[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=False ,__magic_name__=None )-> List[str]:
"""simple docstring"""
snake_case_ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] ,decoder_hidden_size=64 ,depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
snake_case_ : Optional[int] = GLPNImageProcessor()
# prepare image
snake_case_ : Any = prepare_img()
snake_case_ : Any = image_processor(images=__magic_name__ ,return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
snake_case_ : Optional[int] = torch.load(__magic_name__ ,map_location=torch.device("cpu" ) )
# rename keys
snake_case_ : List[str] = rename_keys(__magic_name__ )
# key and value matrices need special treatment
read_in_k_v(__magic_name__ ,__magic_name__ )
# create HuggingFace model and load state dict
snake_case_ : str = GLPNForDepthEstimation(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# forward pass
snake_case_ : Union[str, Any] = model(__magic_name__ )
snake_case_ : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
snake_case_ : Dict = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
snake_case_ : List[Any] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
snake_case_ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] ,__magic_name__ ,atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(__magic_name__ ,__magic_name__ ) ,organization="nielsr" ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
image_processor.push_to_hub(
repo_path_or_name=Path(__magic_name__ ,__magic_name__ ) ,organization="nielsr" ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__lowerCamelCase : str = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Any = tf.convert_to_tensor(__magic_name__ )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : List[Any] = tf.convert_to_tensor(__magic_name__ )
snake_case_ : Optional[Any] = tf.cast(math.pi ,x.dtype )
snake_case_ : int = tf.cast(0.044_715 ,x.dtype )
snake_case_ : Optional[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ ,3 )) ))
return x * cdf
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = tf.convert_to_tensor(__magic_name__ )
snake_case_ : str = tf.cast(0.044_715 ,x.dtype )
snake_case_ : Tuple = tf.cast(0.7_978_845_608 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[str] = tf.convert_to_tensor(__magic_name__ )
snake_case_ : Optional[int] = tf.cast(1.702 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return tf.clip_by_value(_gelu(__magic_name__ ) ,-10 ,10 )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=-1 )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = tf.split(__magic_name__ ,2 ,axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
return tf.keras.activations.gelu(__magic_name__ ,approximate=__magic_name__ )
__lowerCamelCase : int = tf.keras.activations.gelu
__lowerCamelCase : str = approximate_gelu_wrap
else:
__lowerCamelCase : Optional[int] = _gelu
__lowerCamelCase : List[Any] = _gelu_new
__lowerCamelCase : List[str] = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 701 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 0 |
'''simple docstring'''
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case_ : str = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(__magic_name__ )}'''
)
raise ValueError(__magic_name__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] ,3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Any = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ['''GLPNFeatureExtractor''']
__lowerCamelCase : Any = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 ,0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 ,1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 ,0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 ,1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 704 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = IFInpaintingSuperResolutionPipeline
a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
a__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _A ( self :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
snake_case_ : List[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :float = 0 ) -> None:
'''simple docstring'''
snake_case_ : str = row, column
snake_case_ : Dict = [[default_value for c in range(lowerCAmelCase__ )] for r in range(lowerCAmelCase__ )]
def __str__( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : str = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
snake_case_ : Any = 0
for row_vector in self.array:
for obj in row_vector:
snake_case_ : Dict = max(lowerCAmelCase__ , len(str(lowerCAmelCase__ ) ) )
snake_case_ : str = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCAmelCase__ :list[float] ) -> str:
nonlocal string_format_identifier
snake_case_ : Tuple = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self :Tuple ) -> str:
'''simple docstring'''
return str(self )
def _A ( self :List[Any] , lowerCAmelCase__ :tuple[int, int] ) -> bool:
'''simple docstring'''
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and len(lowerCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Optional[Any] , lowerCAmelCase__ :tuple[int, int] ) -> Any:
'''simple docstring'''
assert self.validate_indicies(lowerCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self :Tuple , lowerCAmelCase__ :tuple[int, int] , lowerCAmelCase__ :float ) -> None:
'''simple docstring'''
assert self.validate_indicies(lowerCAmelCase__ )
snake_case_ : Any = value
def __add__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
snake_case_ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self :int ) -> Matrix:
'''simple docstring'''
snake_case_ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Any = -self[r, c]
return result
def __sub__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self :Optional[int] , lowerCAmelCase__ :int | float | Matrix ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ): # Scalar multiplication
snake_case_ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Dict = self[r, c] * another
return result
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
snake_case_ : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case_ : List[Any] = F'''Unsupported type given for another ({type(lowerCAmelCase__ )})'''
raise TypeError(lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Matrix:
'''simple docstring'''
snake_case_ : str = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Union[str, Any] = self[r, c]
return result
def _A ( self :Dict , lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Any:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case_ : Dict = v.transpose()
snake_case_ : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : Dict = Matrix(3 ,3 ,0 )
for i in range(3 ):
snake_case_ : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
snake_case_ : int = Matrix(3 ,1 ,0 )
snake_case_ : Optional[Any] = 1, 2, -3
snake_case_ : Dict = Matrix(3 ,1 ,0 )
snake_case_ : Optional[Any] = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__magic_name__ ,__magic_name__ )}''' )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 706 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''swin2sr'''
a__ = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Dict=64 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :List[str]=180 , lowerCAmelCase__ :Dict=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__ :Optional[int]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__ :List[str]=8 , lowerCAmelCase__ :Dict=2.0 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=1E-5 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Tuple=1.0 , lowerCAmelCase__ :List[Any]="1conv" , lowerCAmelCase__ :Optional[Any]="pixelshuffle" , **lowerCAmelCase__ :Any , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Tuple = image_size
snake_case_ : List[Any] = patch_size
snake_case_ : int = num_channels
snake_case_ : Tuple = embed_dim
snake_case_ : Any = depths
snake_case_ : List[str] = len(lowerCAmelCase__ )
snake_case_ : Tuple = num_heads
snake_case_ : int = window_size
snake_case_ : List[str] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = drop_path_rate
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = use_absolute_embeddings
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = upscale
snake_case_ : str = img_range
snake_case_ : Any = resi_connection
snake_case_ : Any = upsampler
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ (metaclass=a_ ):
"""simple docstring"""
a__ = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self :Any , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _A ( cls :Union[str, Any] , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _A ( cls :List[Any] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 0 |
'''simple docstring'''
class A_ :
"""simple docstring"""
def __init__( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = ""
snake_case_ : List[Any] = ""
snake_case_ : Dict = []
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case_ : Any = self.__min_dist_top_down_dp(lowerCAmelCase__ , n - 1 )
snake_case_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase__ )
snake_case_ : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case_ : Union[str, Any] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
def _A ( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : int = worda
snake_case_ : Optional[int] = worda
snake_case_ : int = [[-1 for _ in range(len(lowerCAmelCase__ ) )] for _ in range(len(lowerCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase__ ) - 1 , len(lowerCAmelCase__ ) - 1 )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = worda
snake_case_ : Optional[int] = worda
snake_case_ : Union[str, Any] = len(lowerCAmelCase__ )
snake_case_ : List[str] = len(lowerCAmelCase__ )
snake_case_ : str = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case_ : Optional[Any] = j
elif j == 0: # second string is empty
snake_case_ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case_ : Optional[int] = self.dp[i - 1][j - 1]
else:
snake_case_ : int = self.dp[i][j - 1]
snake_case_ : List[str] = self.dp[i - 1][j]
snake_case_ : List[Any] = self.dp[i - 1][j - 1]
snake_case_ : Any = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase : Any = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
__lowerCamelCase : Dict = input('''Enter the first string: ''').strip()
__lowerCamelCase : int = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 710 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A_ :
"""simple docstring"""
a__ = PegasusConfig
a__ = {}
a__ = '''gelu'''
def __init__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str]=13 , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Optional[Any]=99 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :str=37 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[Any]=40 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Union[str, Any]=0 , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Any = batch_size
snake_case_ : Optional[int] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Tuple = use_labels
snake_case_ : Optional[int] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[Any] = eos_token_id
snake_case_ : str = pad_token_id
snake_case_ : List[Any] = bos_token_id
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : Any = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> str:
'''simple docstring'''
snake_case_ : Tuple = TFPegasusModel(config=lowerCAmelCase__ ).get_decoder()
snake_case_ : Optional[int] = inputs_dict["input_ids"]
snake_case_ : Tuple = input_ids[:1, :]
snake_case_ : Dict = inputs_dict["attention_mask"][:1, :]
snake_case_ : List[Any] = inputs_dict["head_mask"]
snake_case_ : Optional[Any] = 1
# first forward pass
snake_case_ : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None ,__magic_name__=None ,__magic_name__=None ,__magic_name__=None ,)-> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
snake_case_ : Tuple = tf.cast(tf.math.not_equal(__magic_name__ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
snake_case_ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a__ = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ = True
a__ = False
a__ = False
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = TFPegasusModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=lowerCAmelCase__ )
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a__ = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a__ = '''google/pegasus-xsum'''
@cached_property
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _A ( self :Union[str, Any] , **lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.translate_src_text(**lowerCAmelCase__ )
assert self.expected_text == generated_words
def _A ( self :str , **lowerCAmelCase__ :Dict ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer(self.src_text , **lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="tf" )
snake_case_ : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase__ , )
snake_case_ : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )
return generated_words
@slow
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 711 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCamelCase : str = 3
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
print("Generating primitive root of p" )
while True:
snake_case_ : int = random.randrange(3 ,__magic_name__ )
if pow(__magic_name__ ,2 ,__magic_name__ ) == 1:
continue
if pow(__magic_name__ ,__magic_name__ ,__magic_name__ ) == 1:
continue
return g
def __UpperCAmelCase ( __magic_name__ )-> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print("Generating prime p..." )
snake_case_ : str = rabin_miller.generate_large_prime(__magic_name__ ) # select large prime number.
snake_case_ : Dict = primitive_root(__magic_name__ ) # one primitive root on modulo p.
snake_case_ : Optional[int] = random.randrange(3 ,__magic_name__ ) # private_key -> have to be greater than 2 for safety.
snake_case_ : Union[str, Any] = cryptomath.find_mod_inverse(pow(__magic_name__ ,__magic_name__ ,__magic_name__ ) ,__magic_name__ )
snake_case_ : Optional[Any] = (key_size, e_a, e_a, p)
snake_case_ : Union[str, Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
snake_case_ : Union[str, Any] = generate_key(__magic_name__ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' ,"w" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' ,"w" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" ,2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 712 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger('''transformers.models.speecht5''')
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
snake_case_ : Union[str, Any] = checkpoint["input_conv.weight_g"]
snake_case_ : List[Any] = checkpoint["input_conv.weight_v"]
snake_case_ : Optional[int] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
snake_case_ : Optional[Any] = checkpoint[F'''upsamples.{i}.1.weight_g''']
snake_case_ : Optional[Any] = checkpoint[F'''upsamples.{i}.1.weight_v''']
snake_case_ : Tuple = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case_ : Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
snake_case_ : List[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
snake_case_ : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
snake_case_ : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
snake_case_ : List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
snake_case_ : int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
snake_case_ : int = checkpoint["output_conv.1.weight_g"]
snake_case_ : Dict = checkpoint["output_conv.1.weight_v"]
snake_case_ : Optional[Any] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None ,)-> Dict:
"""simple docstring"""
if config_path is not None:
snake_case_ : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__magic_name__ )
else:
snake_case_ : List[Any] = SpeechTaHifiGanConfig()
snake_case_ : List[str] = SpeechTaHifiGan(__magic_name__ )
snake_case_ : List[Any] = torch.load(__magic_name__ )
load_weights(orig_checkpoint["model"]["generator"] ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = np.load(__magic_name__ )
snake_case_ : int = stats[0].reshape(-1 )
snake_case_ : Optional[Any] = stats[1].reshape(-1 )
snake_case_ : Any = torch.from_numpy(__magic_name__ ).float()
snake_case_ : int = torch.from_numpy(__magic_name__ ).float()
model.save_pretrained(__magic_name__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 713 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 714 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :list[tuple[float, float]] ) -> str:
'''simple docstring'''
snake_case_ : Any = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case_ : Optional[Any] = len(lowerCAmelCase__ ) - 1
def _A ( self :int , lowerCAmelCase__ :float ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase__ ) , 5 ) == 1
return output_values
def _A ( self :List[Any] , lowerCAmelCase__ :float ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ : Optional[int] = self.basis_function(lowerCAmelCase__ )
snake_case_ : Any = 0.0
snake_case_ : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _A ( self :Tuple , lowerCAmelCase__ :float = 0.0_1 ) -> Union[str, Any]:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
snake_case_ : list[float] = [] # x coordinates of points to plot
snake_case_ : list[float] = [] # y coordinates of points to plot
snake_case_ : Optional[int] = 0.0
while t <= 1:
snake_case_ : Optional[int] = self.bezier_curve_function(lowerCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case_ : str = [i[0] for i in self.list_of_points]
snake_case_ : int = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase__ , lowerCAmelCase__ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 715 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCamelCase : Union[str, Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = torchvision.models.resnetaaa(pretrained=lowerCAmelCase__ )
snake_case_ : Tuple = list(model.children() )[:-2]
snake_case_ : Union[str, Any] = nn.Sequential(*lowerCAmelCase__ )
snake_case_ : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _A ( self :List[Any] , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.pool(self.model(lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = torch.flatten(lowerCAmelCase__ , start_dim=2 )
snake_case_ : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class A_ (a_ ):
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : str = [json.loads(lowerCAmelCase__ ) for l in open(lowerCAmelCase__ )]
snake_case_ : Tuple = os.path.dirname(lowerCAmelCase__ )
snake_case_ : Any = tokenizer
snake_case_ : List[Any] = labels
snake_case_ : Union[str, Any] = len(lowerCAmelCase__ )
snake_case_ : str = max_seq_length
snake_case_ : Tuple = transforms
def __len__( self :Dict ) -> Any:
'''simple docstring'''
return len(self.data )
def __getitem__( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCAmelCase__ ) )
snake_case_ : str = sentence[0], sentence[1:-1], sentence[-1]
snake_case_ : Tuple = sentence[: self.max_seq_length]
snake_case_ : List[Any] = torch.zeros(self.n_classes )
snake_case_ : List[str] = 1
snake_case_ : str = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
snake_case_ : int = self.transforms(lowerCAmelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Any = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = [len(row["sentence"] ) for row in batch]
snake_case_ : Optional[Any] = len(__magic_name__ ), max(__magic_name__ )
snake_case_ : Any = torch.zeros(__magic_name__ ,__magic_name__ ,dtype=torch.long )
snake_case_ : List[Any] = torch.zeros(__magic_name__ ,__magic_name__ ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__magic_name__ ,__magic_name__ ) ):
snake_case_ : Optional[int] = input_row["sentence"]
snake_case_ : List[str] = 1
snake_case_ : Dict = torch.stack([row["image"] for row in batch] )
snake_case_ : Dict = torch.stack([row["label"] for row in batch] )
snake_case_ : int = torch.stack([row["image_start_token"] for row in batch] )
snake_case_ : List[Any] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] ,std=[0.12_221_994, 0.12_145_835, 0.14_380_469] ,),
] )
| 716 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Any = '''▁'''
__lowerCamelCase : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__lowerCamelCase : Any = {
'''facebook/xglm-564M''': 2048,
}
class A_ (a_ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int="<s>" , lowerCAmelCase__ :Dict="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :Optional[int]="<unk>" , lowerCAmelCase__ :Union[str, Any]="<pad>" , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :List[Any] , ) -> None:
'''simple docstring'''
snake_case_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ : Dict = 7
snake_case_ : Dict = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
snake_case_ : Optional[Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
snake_case_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case_ : Optional[int] = len(self.sp_model )
snake_case_ : int = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCAmelCase__ )
snake_case_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = self.__dict__.copy()
snake_case_ : Tuple = None
snake_case_ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ : Dict = {}
snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _A ( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _A ( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ ))
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ ))
def _A ( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self :int , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : Dict = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self :List[str] , lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
snake_case_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 717 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : int = [1]
for i in range(2 ,__magic_name__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case_ : int = []
snake_case_ : Any = list(range(__magic_name__ ) )
# Find permutation
while factorials:
snake_case_ : Tuple = factorials.pop()
snake_case_ : int = divmod(__magic_name__ ,__magic_name__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class A_ (a_ , a_ ):
"""simple docstring"""
a__ = '''convnextv2'''
def __init__( self :str , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :List[Any]=1E-1_2 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Tuple=224 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Optional[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = num_channels
snake_case_ : Any = patch_size
snake_case_ : Dict = num_stages
snake_case_ : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
snake_case_ : List[Any] = [3, 3, 9, 3] if depths is None else depths
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : Union[str, Any] = image_size
snake_case_ : List[str] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
snake_case_ : int = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 719 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 720 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = str(id_ )
snake_case_ : Union[str, Any] = None
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[Any] = []
snake_case_ : List[str] = {} # {vertex:distance}
def __lt__( self :List[Any] , lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
return self.key < other.key
def __repr__( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.id
def _A ( self :List[Any] , lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
self.neighbors.append(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = weight
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,__magic_name__ )
graph[b - 1].add_edge(graph[a - 1] ,__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for u in graph:
snake_case_ : Optional[int] = math.inf
snake_case_ : Union[str, Any] = None
snake_case_ : List[Any] = 0
snake_case_ : Optional[Any] = graph[:]
while q:
snake_case_ : List[Any] = min(__magic_name__ )
q.remove(__magic_name__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case_ : Any = u
snake_case_ : int = u.edges[v.id]
for i in range(1 ,len(__magic_name__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Iterator[tuple]:
"""simple docstring"""
for u in graph:
snake_case_ : Any = math.inf
snake_case_ : str = None
snake_case_ : List[str] = 0
snake_case_ : Dict = list(__magic_name__ )
hq.heapify(__magic_name__ )
while h:
snake_case_ : Union[str, Any] = hq.heappop(__magic_name__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case_ : Tuple = u
snake_case_ : Any = u.edges[v.id]
hq.heapify(__magic_name__ )
for i in range(1 ,len(__magic_name__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : List[Any] = [True] * 1000001
__lowerCamelCase : List[Any] = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__lowerCamelCase : Tuple = False
i += 1
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
return seive[n]
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
return any(digit in "02468" for digit in str(__magic_name__ ) )
def __UpperCAmelCase ( __magic_name__ = 100_0000 )-> list[int]:
"""simple docstring"""
snake_case_ : Optional[int] = [2] # result already includes the number 2.
for num in range(3 ,limit + 1 ,2 ):
if is_prime(__magic_name__ ) and not contains_an_even_digit(__magic_name__ ):
snake_case_ : List[str] = str(__magic_name__ )
snake_case_ : Optional[int] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__magic_name__ ) )]
if all(is_prime(__magic_name__ ) for i in list_nums ):
result.append(__magic_name__ )
return result
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : Optional[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :dict[str, list[str]] , lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
snake_case_ : List[str] = graph
# mapping node to its parent in resulting breadth first tree
snake_case_ : dict[str, str | None] = {}
snake_case_ : Dict = source_vertex
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
snake_case_ : int = {self.source_vertex}
snake_case_ : Dict = None
snake_case_ : Tuple = [self.source_vertex] # first in first out queue
while queue:
snake_case_ : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase__ )
snake_case_ : List[Any] = vertex
queue.append(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case_ : Any = self.parent.get(lowerCAmelCase__ )
if target_vertex_parent is None:
snake_case_ : Optional[Any] = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCAmelCase__ )
return self.shortest_path(lowerCAmelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__lowerCamelCase : Tuple = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 701 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 0 |
'''simple docstring'''
__lowerCamelCase : Any = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : Tuple = concatenate_datasets
__lowerCamelCase : Optional[Any] = DownloadConfig
__lowerCamelCase : str = DownloadManager
__lowerCamelCase : Optional[Any] = DownloadMode
__lowerCamelCase : Dict = DownloadConfig
__lowerCamelCase : Tuple = DownloadMode
__lowerCamelCase : int = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 702 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ["unit tests", "test file", "configuration file"]
snake_case_ : int = example["content"].splitlines()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : Tuple = example["content"].count("\n" )
snake_case_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : int = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Any = time.time()
__lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Any = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
snake_case_ : Union[str, Any] = [True] * (num + 1)
snake_case_ : Any = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __magic_name__ ):
snake_case_ : Tuple = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[int] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''speech_to_text'''
a__ = ['''past_key_values''']
a__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=10_000 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :Tuple=2_048 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Union[str, Any]=2_048 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str="relu" , lowerCAmelCase__ :Dict=256 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :Optional[int]=6_000 , lowerCAmelCase__ :int=1_024 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :List[Any]=(5, 5) , lowerCAmelCase__ :Optional[Any]=1_024 , lowerCAmelCase__ :Union[str, Any]=80 , lowerCAmelCase__ :int=1 , **lowerCAmelCase__ :Tuple , ) -> Dict:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : str = d_model
snake_case_ : Optional[Any] = encoder_ffn_dim
snake_case_ : Union[str, Any] = encoder_layers
snake_case_ : str = encoder_attention_heads
snake_case_ : Optional[int] = decoder_ffn_dim
snake_case_ : Optional[int] = decoder_layers
snake_case_ : Optional[Any] = decoder_attention_heads
snake_case_ : int = dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : Optional[int] = activation_dropout
snake_case_ : List[Any] = activation_function
snake_case_ : Optional[int] = init_std
snake_case_ : str = encoder_layerdrop
snake_case_ : Optional[int] = decoder_layerdrop
snake_case_ : List[Any] = use_cache
snake_case_ : List[str] = encoder_layers
snake_case_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ : Dict = max_source_positions
snake_case_ : Optional[int] = max_target_positions
snake_case_ : Dict = num_conv_layers
snake_case_ : Tuple = list(lowerCAmelCase__ )
snake_case_ : str = conv_channels
snake_case_ : int = input_feat_per_channel
snake_case_ : List[str] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 704 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _A ( self :str , lowerCAmelCase__ :Tuple=0 ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCAmelCase__ ) )
snake_case_ : Dict = np.random.RandomState(lowerCAmelCase__ )
snake_case_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.7_5,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[str] = self.get_dummy_inputs()
snake_case_ : Optional[int] = pipe(**lowerCAmelCase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ : List[str] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = self.get_dummy_inputs()
snake_case_ : Tuple = pipe(**lowerCAmelCase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Tuple = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# warmup pass to apply optimizations
snake_case_ : Dict = pipe(**self.get_dummy_inputs() )
snake_case_ : List[str] = self.get_dummy_inputs()
snake_case_ : Optional[int] = pipe(**lowerCAmelCase__ ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : List[str] = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Any = self.get_dummy_inputs()
snake_case_ : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
snake_case_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Union[str, Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[str] = self.get_dummy_inputs()
snake_case_ : int = pipe(**lowerCAmelCase__ ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Tuple = self.get_dummy_inputs()
snake_case_ : Tuple = pipe(**lowerCAmelCase__ ).images
snake_case_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ : Tuple = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
@property
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Any = ort.SessionOptions()
snake_case_ : Tuple = False
return options
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[Any] = "A fantasy landscape, trending on artstation"
snake_case_ : Optional[Any] = np.random.RandomState(0 )
snake_case_ : Dict = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images
snake_case_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ : Dict = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : Tuple = init_image.resize((768, 512) )
snake_case_ : int = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
snake_case_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : int = "A fantasy landscape, trending on artstation"
snake_case_ : Dict = np.random.RandomState(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Any = output.images
snake_case_ : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 706 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowerCamelCase : int = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
snake_case_ : List[str] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = KandinskyVaaInpaintPipeline
a__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
a__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
a__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
a__ = False
@property
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return 100
@property
def _A ( self :str ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case_ : Any = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.dummy_unet
snake_case_ : List[Any] = self.dummy_movq
snake_case_ : Dict = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
snake_case_ : Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=0 ) -> List[str]:
'''simple docstring'''
snake_case_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
snake_case_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
snake_case_ : Dict = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Union[str, Any] = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
snake_case_ : Dict = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case_ : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = "cpu"
snake_case_ : Optional[int] = self.get_dummy_components()
snake_case_ : Optional[int] = self.pipeline_class(**lowerCAmelCase__ )
snake_case_ : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
snake_case_ : Any = output.images
snake_case_ : Dict = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Dict = image[0, -3:, -3:, -1]
snake_case_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[str] = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _A ( self :str ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :int ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
snake_case_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case_ : int = np.ones((768, 768) , dtype=np.floataa )
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = "a hat"
snake_case_ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
snake_case_ : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
snake_case_ : Union[str, Any] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ : Union[str, Any] = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case_ : List[Any] = pipeline(
image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''conditional_detr'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Dict=300 , lowerCAmelCase__ :List[Any]=6 , lowerCAmelCase__ :Any=2_048 , lowerCAmelCase__ :Any=8 , lowerCAmelCase__ :Union[str, Any]=6 , lowerCAmelCase__ :Tuple=2_048 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Dict=0.0 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[Any]="relu" , lowerCAmelCase__ :Tuple=256 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Dict=1.0 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]="sine" , lowerCAmelCase__ :Any="resnet50" , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :List[Any]=5 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :Tuple=5 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[str]=0.2_5 , **lowerCAmelCase__ :Tuple , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Optional[Any] = backbone_config.get("model_type" )
snake_case_ : Tuple = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[str] = config_class.from_dict(lowerCAmelCase__ )
snake_case_ : int = use_timm_backbone
snake_case_ : Union[str, Any] = backbone_config
snake_case_ : int = num_channels
snake_case_ : int = num_queries
snake_case_ : List[Any] = d_model
snake_case_ : int = encoder_ffn_dim
snake_case_ : str = encoder_layers
snake_case_ : str = encoder_attention_heads
snake_case_ : List[Any] = decoder_ffn_dim
snake_case_ : Union[str, Any] = decoder_layers
snake_case_ : List[Any] = decoder_attention_heads
snake_case_ : List[Any] = dropout
snake_case_ : int = attention_dropout
snake_case_ : str = activation_dropout
snake_case_ : int = activation_function
snake_case_ : str = init_std
snake_case_ : Union[str, Any] = init_xavier_std
snake_case_ : Optional[Any] = encoder_layerdrop
snake_case_ : str = decoder_layerdrop
snake_case_ : Tuple = encoder_layers
snake_case_ : Dict = auxiliary_loss
snake_case_ : str = position_embedding_type
snake_case_ : Dict = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Tuple = dilation
# Hungarian matcher
snake_case_ : int = class_cost
snake_case_ : Optional[int] = bbox_cost
snake_case_ : int = giou_cost
# Loss coefficients
snake_case_ : Optional[int] = mask_loss_coefficient
snake_case_ : Dict = dice_loss_coefficient
snake_case_ : Optional[int] = cls_loss_coefficient
snake_case_ : Any = bbox_loss_coefficient
snake_case_ : List[Any] = giou_loss_coefficient
snake_case_ : List[str] = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self :Dict ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
return self.d_model
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : str = self.__class__.model_type
return output
class A_ (a_ ):
"""simple docstring"""
a__ = version.parse('''1.11''' )
@property
def _A ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _A ( self :Dict ) -> float:
'''simple docstring'''
return 1E-5
@property
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
return 12
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a_ , )
class A_ (a_ ):
"""simple docstring"""
a__ = RobertaConfig
a__ = '''roberta'''
def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
snake_case_ : int = RobertaEmbeddings(lowerCAmelCase__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a_ , )
class A_ (a_ ):
"""simple docstring"""
a__ = RobertaConfig
a__ = '''roberta'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
snake_case_ : Tuple = config.num_labels
snake_case_ : List[str] = config.num_hidden_layers
snake_case_ : Optional[int] = DeeRobertaModel(lowerCAmelCase__ )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Optional[int] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[Any]=-1 , lowerCAmelCase__ :Union[str, Any]=False , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.num_layers
try:
snake_case_ : Optional[Any] = self.roberta(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , )
snake_case_ : str = outputs[1]
snake_case_ : Any = self.dropout(lowerCAmelCase__ )
snake_case_ : str = self.classifier(lowerCAmelCase__ )
snake_case_ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : str = e.message
snake_case_ : Any = e.exit_layer
snake_case_ : List[Any] = outputs[0]
if not self.training:
snake_case_ : Tuple = entropy(lowerCAmelCase__ )
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[Any] = MSELoss()
snake_case_ : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Tuple = CrossEntropyLoss()
snake_case_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Union[str, Any] = []
for highway_exit in outputs[-1]:
snake_case_ : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Tuple = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase__ )
if train_highway:
snake_case_ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : Union[str, Any] = (loss,) + outputs
if not self.training:
snake_case_ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__magic_name__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"-m" ,"--pretrained_model_name_or_path" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,)
parser.add_argument(
"-c" ,"--caption" ,type=__magic_name__ ,default="robotic cat with wings" ,help="Text used to generate images." ,)
parser.add_argument(
"-n" ,"--images_num" ,type=__magic_name__ ,default=4 ,help="How much images to generate." ,)
parser.add_argument(
"-s" ,"--seed" ,type=__magic_name__ ,default=42 ,help="Seed for random process." ,)
parser.add_argument(
"-ci" ,"--cuda_id" ,type=__magic_name__ ,default=0 ,help="cuda_id." ,)
snake_case_ : Union[str, Any] = parser.parse_args()
return args
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
if not len(__magic_name__ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
snake_case_ : Optional[Any] = imgs[0].size
snake_case_ : int = Image.new("RGB" ,size=(cols * w, rows * h) )
snake_case_ : Any = grid.size
for i, img in enumerate(__magic_name__ ):
grid.paste(__magic_name__ ,box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( __magic_name__ ,__magic_name__="robotic cat with wings" ,__magic_name__=7.5 ,__magic_name__=50 ,__magic_name__=1 ,__magic_name__=42 ,)-> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = torch.Generator(pipeline.device ).manual_seed(__magic_name__ )
snake_case_ : Any = pipeline(
__magic_name__ ,guidance_scale=__magic_name__ ,num_inference_steps=__magic_name__ ,generator=__magic_name__ ,num_images_per_prompt=__magic_name__ ,).images
snake_case_ : Optional[Any] = int(math.sqrt(__magic_name__ ) )
snake_case_ : Dict = image_grid(__magic_name__ ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
__lowerCamelCase : Dict = parse_args()
# Load models and create wrapper for stable diffusion
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__lowerCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__lowerCamelCase : List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__lowerCamelCase : str = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__lowerCamelCase : Any = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__lowerCamelCase : List[str] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__lowerCamelCase : str = unet.to(torch.device('''cuda''', args.cuda_id))
__lowerCamelCase : int = pipeline.to(unet.device)
__lowerCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__lowerCamelCase : Tuple = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 710 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_neo'''
a__ = ['''past_key_values''']
a__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :List[str] , lowerCAmelCase__ :Tuple=50_257 , lowerCAmelCase__ :Optional[Any]=2_048 , lowerCAmelCase__ :Tuple=2_048 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :List[str]=[[["global", "local"], 12]] , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Dict=256 , lowerCAmelCase__ :Union[str, Any]="gelu_new" , lowerCAmelCase__ :Dict=0.0 , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :Tuple=50_256 , **lowerCAmelCase__ :Any , ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = vocab_size
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Tuple = hidden_size
snake_case_ : int = num_layers
snake_case_ : List[str] = num_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = window_size
snake_case_ : List[Any] = activation_function
snake_case_ : List[Any] = resid_dropout
snake_case_ : Union[str, Any] = embed_dropout
snake_case_ : List[str] = attention_dropout
snake_case_ : List[Any] = classifier_dropout
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[int] = use_cache
snake_case_ : int = bos_token_id
snake_case_ : Dict = eos_token_id
snake_case_ : Any = attention_types
snake_case_ : Union[str, Any] = self.expand_attention_types_params(lowerCAmelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@staticmethod
def _A ( lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
import torch
snake_case_ : Optional[Any] = input.size()
snake_case_ : Optional[int] = len(__magic_name__ )
snake_case_ : List[Any] = shape[dimension]
snake_case_ : List[Any] = torch.arange(0 ,__magic_name__ ,__magic_name__ )
snake_case_ : str = torch.div(sizedim - size ,__magic_name__ ,rounding_mode="floor" ) + 1
snake_case_ : Dict = torch.arange(__magic_name__ ) + low_indices[:min_length][:, None]
snake_case_ : List[str] = [slice(__magic_name__ )] * rank
snake_case_ : List[str] = indices
snake_case_ : Optional[Any] = input[s]
snake_case_ : List[str] = list(range(0 ,rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
import torch
snake_case_ : List[Any] = torch.arange(1 ,__magic_name__ )
snake_case_ : List[str] = torch.remainder(__magic_name__ ,__magic_name__ )
snake_case_ : Optional[int] = remainders == 0
snake_case_ : Union[str, Any] = candidates[divisor_indices]
snake_case_ : Any = torch.max(__magic_name__ )
return largest_divisor, torch.div(__magic_name__ ,__magic_name__ ,rounding_mode="floor" )
class A_ (a_ ):
"""simple docstring"""
@property
def _A ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
snake_case_ : str = {0: "batch", 1: "past_sequence + sequence"}
else:
snake_case_ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._config.num_heads
def _A ( self :Optional[int] , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
snake_case_ : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case_ : List[str] = seqlen + 2
snake_case_ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ : Union[str, Any] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
snake_case_ : List[str] = common_inputs["attention_mask"]
if self.use_past:
snake_case_ : Any = ordered_inputs["attention_mask"].dtype
snake_case_ : str = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def _A ( self :Any ) -> int:
'''simple docstring'''
return 13
| 711 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowerCamelCase : Union[str, Any] = ['''bert-base-uncased''', '''bert-base-cased''']
__lowerCamelCase : Optional[int] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class A_ (tf.keras.Model ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : int = tokenizer
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = TFAutoModel.from_config(lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer(lowerCAmelCase__ )
snake_case_ : Tuple = self.bert(**lowerCAmelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : str = [
BertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
snake_case_ : Dict = [TFBertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCAmelCase__ , use_fast_bert_tokenizer=lowerCAmelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case_ : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case_ : List[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ : List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding="longest" )
snake_case_ : Dict = tf_tokenizer(lowerCAmelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Optional[Any] = tf_tokenizer(self.paired_sentences )
snake_case_ : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Any = tf.function(lowerCAmelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ : Optional[Any] = tf.constant(lowerCAmelCase__ )
snake_case_ : List[Any] = compiled_tokenizer(lowerCAmelCase__ )
snake_case_ : List[str] = tf_tokenizer(lowerCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Optional[int] = ModelToSave(tokenizer=lowerCAmelCase__ )
snake_case_ : str = tf.convert_to_tensor(self.test_sentences )
snake_case_ : Dict = model(lowerCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case_ : Optional[int] = Path(lowerCAmelCase__ ) / "saved.model"
model.save(lowerCAmelCase__ )
snake_case_ : Dict = tf.keras.models.load_model(lowerCAmelCase__ )
snake_case_ : str = loaded_model(lowerCAmelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 712 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : str = 128022
__lowerCamelCase : List[Any] = 128028
@require_sentencepiece
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MaMaaaTokenizer
a__ = False
a__ = False
a__ = True
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[int] = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = "</s>"
snake_case_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , "This is a test" )
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = '''facebook/m2m100_418M'''
a__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def _A ( cls :str ) -> int:
'''simple docstring'''
snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
snake_case_ : List[str] = 1
return cls
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = "en"
snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ )
@require_torch
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "en"
snake_case_ : Tuple = "fr"
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ : int = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 656 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
__lowerCamelCase : Union[str, Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''tapas'''
def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any]=30_522 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[Any]=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase__ :Tuple=0.0_2 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Tuple=10.0 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :str=1.0 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=1.0 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Any=1.0 , lowerCAmelCase__ :List[Any]=1.0 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Optional[Any]="ratio" , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=None , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : Tuple = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_sizes
snake_case_ : Optional[int] = initializer_range
snake_case_ : int = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Dict = positive_label_weight
snake_case_ : List[str] = num_aggregation_labels
snake_case_ : Union[str, Any] = aggregation_loss_weight
snake_case_ : Tuple = use_answer_as_supervision
snake_case_ : Optional[Any] = answer_loss_importance
snake_case_ : Dict = use_normalized_answer_loss
snake_case_ : Any = huber_loss_delta
snake_case_ : Optional[int] = temperature
snake_case_ : int = aggregation_temperature
snake_case_ : Union[str, Any] = use_gumbel_for_cells
snake_case_ : List[Any] = use_gumbel_for_aggregation
snake_case_ : Optional[Any] = average_approximation_function
snake_case_ : Optional[int] = cell_selection_preference
snake_case_ : Union[str, Any] = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : List[Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : List[str] = select_one_column
snake_case_ : List[str] = allow_empty_column_selection
snake_case_ : Dict = init_cell_selection_weights_to_zero
snake_case_ : int = reset_position_index_per_cell
snake_case_ : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase__ ):
snake_case_ : Tuple = {int(lowerCAmelCase__ ): v for k, v in aggregation_labels.items()}
| 713 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowerCamelCase : Optional[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__lowerCamelCase : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__lowerCamelCase : Dict = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = simple_accuracy(__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = float(fa_score(y_true=__magic_name__ ,y_pred=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : List[str] = float(pearsonr(__magic_name__ ,__magic_name__ )[0] )
snake_case_ : Tuple = float(spearmanr(__magic_name__ ,__magic_name__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 714 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 715 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = "tester"
snake_case_ : Tuple = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _A ( self :Dict ) -> str:
'''simple docstring'''
pass
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _A ( self :int ) -> Dict:
'''simple docstring'''
pass
| 656 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
@property
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : str = ort.SessionOptions()
snake_case_ : List[str] = False
return options
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : int = "A red cat sitting on a park bench"
snake_case_ : Optional[Any] = np.random.RandomState(0 )
snake_case_ : Dict = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[Any] = output.images
snake_case_ : Any = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ : Dict = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
snake_case_ : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = "A red cat sitting on a park bench"
snake_case_ : int = np.random.RandomState(0 )
snake_case_ : int = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : List[Any] = output.images
snake_case_ : Dict = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ : Dict = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 716 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
snake_case_ : int = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__magic_name__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case_ : Dict = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__magic_name__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
snake_case_ : Dict = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__magic_name__ )
snake_case_ : Optional[int] = []
for value in value_array:
snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] )
snake_case_ : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ )
if dist > temp_dist:
snake_case_ : Tuple = temp_dist
snake_case_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
'''simple docstring'''
import socket
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
snake_case_ : Union[str, Any] = socket.gethostname()
snake_case_ : Union[str, Any] = 1_2312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" ,"wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case_ : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(__magic_name__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()]
snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )]
snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ )
if save_path is not None:
save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 656 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : List[str] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ['''DeiTFeatureExtractor''']
__lowerCamelCase : Optional[Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
snake_case_ : int = value
else:
snake_case_ : int = value
return new_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = ""
if is_panoptic:
snake_case_ : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Tuple = in_proj_weight[:256, :]
snake_case_ : List[Any] = in_proj_bias[:256]
snake_case_ : Optional[Any] = in_proj_weight[256:512, :]
snake_case_ : Optional[int] = in_proj_bias[256:512]
snake_case_ : Optional[int] = in_proj_weight[-256:, :]
snake_case_ : str = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ : Optional[Any] = "resnet101"
if "dc5" in model_name:
snake_case_ : List[str] = True
snake_case_ : Tuple = "panoptic" in model_name
if is_panoptic:
snake_case_ : List[Any] = 250
else:
snake_case_ : Optional[Any] = 91
snake_case_ : Optional[int] = "huggingface/label-files"
snake_case_ : Dict = "coco-detection-id2label.json"
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" )
snake_case_ : Union[str, Any] = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ : Any = "conditional_detr." + src
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : int = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : Tuple = state_dict.pop(__magic_name__ )
snake_case_ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case_ : Any = state_dict.pop(__magic_name__ )
snake_case_ : List[Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
snake_case_ : Dict = conditional_detr(__magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : int = tempfile.mkdtemp()
snake_case_ : Union[str, Any] = 8
# DPR tok
snake_case_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case_ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
snake_case_ : Tuple = os.path.join(lowerCAmelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Tuple = {"unk_token": "<unk>"}
snake_case_ : str = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
snake_case_ : Any = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : List[Any] = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Union[str, Any] ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _A ( self :List[str] ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _A ( self :Union[str, Any] ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _A ( self :int ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : str = self.get_dummy_dataset()
snake_case_ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case_ : List[Any] = dataset
snake_case_ : Optional[Any] = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _A ( self :Tuple , lowerCAmelCase__ :bool ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.get_dummy_dataset()
snake_case_ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case_ : Any = os.path.join(self.tmpdirname , "dataset" )
snake_case_ : Any = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case_ : int = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ : str = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase__ ) , )
return retriever
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ : str = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case_ : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case_ : int = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(lowerCAmelCase__ , open(lowerCAmelCase__ , "wb" ) )
snake_case_ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case_ : Union[str, Any] = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = 1
snake_case_ : List[str] = self.get_dummy_canonical_hf_index_retriever()
snake_case_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : Union[str, Any] = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case_ : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Optional[Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : str = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : str = 1
snake_case_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
snake_case_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : Dict = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : Any = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = 1
snake_case_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
snake_case_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : int = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Dict = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : int = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = 1
snake_case_ : Dict = self.get_dummy_legacy_index_retriever()
snake_case_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : Optional[Any] = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
snake_case_ : Optional[Any] = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : List[Any] = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
import torch
snake_case_ : Optional[Any] = 1
snake_case_ : Dict = self.get_dummy_canonical_hf_index_retriever()
snake_case_ : str = [[5, 7], [10, 11]]
snake_case_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : List[Any] = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
snake_case_ : Any = retriever(
lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ , return_tensors="pt" , )
snake_case_ : Union[str, Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : str = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ : int = 1
snake_case_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase__ )
snake_case_ : Optional[Any] = [[5, 7], [10, 11]]
snake_case_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : str = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ )
self.assertEqual(
len(lowerCAmelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , lowerCAmelCase__ ) # check for doc token related keys in dictionary.
| 719 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
while a != 0:
snake_case_ : Any = b % a, a
return b
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if gcd(__magic_name__ ,__magic_name__ ) != 1:
snake_case_ : List[Any] = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__magic_name__ )
snake_case_ : int = 1, 0, a
snake_case_ : Tuple = 0, 1, m
while va != 0:
snake_case_ : int = ua // va
snake_case_ : Tuple = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 720 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''xlnet'''
a__ = ['''mems''']
a__ = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Any , lowerCAmelCase__ :Optional[int]=32_000 , lowerCAmelCase__ :Any=1_024 , lowerCAmelCase__ :Tuple=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[Any]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[int]="bi" , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Tuple=-1 , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Optional[int]="last" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str="tanh" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :int=5 , lowerCAmelCase__ :Optional[int]=5 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Dict=2 , **lowerCAmelCase__ :Tuple , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = vocab_size
snake_case_ : int = d_model
snake_case_ : List[str] = n_layer
snake_case_ : Optional[int] = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
snake_case_ : Any = d_model // n_head
snake_case_ : int = ff_activation
snake_case_ : str = d_inner
snake_case_ : Optional[Any] = untie_r
snake_case_ : Tuple = attn_type
snake_case_ : Dict = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : Optional[Any] = dropout
snake_case_ : Union[str, Any] = mem_len
snake_case_ : Optional[int] = reuse_len
snake_case_ : Any = bi_data
snake_case_ : Any = clamp_len
snake_case_ : int = same_length
snake_case_ : Any = summary_type
snake_case_ : Dict = summary_use_proj
snake_case_ : str = summary_activation
snake_case_ : Tuple = summary_last_dropout
snake_case_ : int = start_n_top
snake_case_ : str = end_n_top
snake_case_ : List[Any] = bos_token_id
snake_case_ : List[str] = pad_token_id
snake_case_ : Optional[int] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , lowerCAmelCase__ , )
snake_case_ : Optional[int] = kwargs["use_cache"]
snake_case_ : Optional[int] = use_mems_eval
snake_case_ : Optional[Any] = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _A ( self :int , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 721 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case_ : Dict = roberta.model.encoder.sentence_encoder
snake_case_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__magic_name__ )
snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
snake_case_ : int = roberta_sent_encoder.embed_positions.weight
snake_case_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case_ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : BertLayer = model.roberta.encoder.layer[i]
snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case_ : RobertaAttention = layer.attention
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight
snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case_ : Any = roberta_layer.self_attn.q_proj.bias
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight
snake_case_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight
snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case_ : int = roberta_layer.final_layer_norm.weight
snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : List[str] = roberta_layer.fca.weight
snake_case_ : List[Any] = roberta_layer.fca.bias
# output
snake_case_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case_ : Any = roberta_layer.fca.weight
snake_case_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight
snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
snake_case_ : int = roberta.model.encoder.lm_head.dense.bias
snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case_ : int = roberta.model.encoder.lm_head.weight
snake_case_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case_ : Union[str, Any] = model(__magic_name__ )[0]
if classification_head:
snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) )
else:
snake_case_ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape ,their_output.shape )
snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Tuple ,__snake_case :List[Any] ,__snake_case :Any ) -> Tuple:
a__ = params
a__ = np.array(__snake_case )
a__ = np.array([len(__snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self :str ,__snake_case :Union[str, Any] ) -> int:
return (self.token_ids[index], self.lengths[index])
def __len__( self :Union[str, Any] ) -> List[Any]:
return len(self.lengths )
def lowerCamelCase__( self :Union[str, Any] ) -> str:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCamelCase__( self :int ) -> Optional[int]:
a__ = self.params.max_model_input_size
a__ = self.lengths > max_len
logger.info(F'Splitting {sum(__snake_case )} too long sequences.' )
def divide_chunks(__snake_case :Union[str, Any] ,__snake_case :List[str] ):
return [l[i : i + n] for i in range(0 ,len(__snake_case ) ,__snake_case )]
a__ = []
a__ = []
if self.params.mlm:
a__ , a__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
a__ , a__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
a__ = np.insert(__snake_case ,0 ,__snake_case )
if sub_s[-1] != sep_id:
a__ = np.insert(__snake_case ,len(__snake_case ) ,__snake_case )
assert len(__snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__snake_case )
new_tok_ids.extend(__snake_case )
new_lengths.extend([len(__snake_case ) for l in sub_seqs] )
a__ = np.array(__snake_case )
a__ = np.array(__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = len(self )
a__ = self.lengths > 11
a__ = self.token_ids[indices]
a__ = self.lengths[indices]
a__ = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def lowerCamelCase__( self :List[Any] ) -> Tuple:
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ = self.params.special_tok_ids['unk_token']
a__ = len(self )
a__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ = (unk_occs / self.lengths) < 0.5
a__ = self.token_ids[indices]
a__ = self.lengths[indices]
a__ = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def lowerCamelCase__( self :List[str] ) -> List[Any]:
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ) -> int:
a__ = [t[0] for t in batch]
a__ = [t[1] for t in batch]
assert len(__snake_case ) == len(__snake_case )
# Max for paddings
a__ = max(__snake_case )
# Pad token ids
if self.params.mlm:
a__ = self.params.special_tok_ids['pad_token']
else:
a__ = self.params.special_tok_ids['unk_token']
a__ = [list(t.astype(__snake_case ) ) + [pad_idx] * (max_seq_len_ - len(__snake_case )) for t in token_ids]
assert len(tk_ ) == len(__snake_case )
assert all(len(__snake_case ) == max_seq_len_ for t in tk_ )
a__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ = torch.tensor(__snake_case ) # (bs)
return tk_t, lg_t
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import math
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
return math.pow(__lowerCAmelCase , 2 ) - a
def __lowercase ( __lowerCAmelCase : float ):
return 2 * x
def __lowercase ( __lowerCAmelCase : float ):
a__ = 2.0
while start <= a:
a__ = math.pow(__lowerCAmelCase , 2 )
return start
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : int = 9_9_9_9 , __lowerCAmelCase : float = 0.00_000_000_000_001 ):
if a < 0:
raise ValueError('math domain error' )
a__ = get_initial_point(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
a__ = value
a__ = value - fx(__lowerCAmelCase , __lowerCAmelCase ) / fx_derivative(__lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ (unittest.TestCase ):
@require_torch
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = pipeline(
task='zero-shot-audio-classification' ,model='hf-internal-testing/tiny-clap-htsat-unfused' )
a__ = load_dataset('ashraq/esc50' )
a__ = dataset['train']['audio'][-1]['array']
a__ = audio_classifier(__snake_case ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) ,[{'score': 0.5_01, 'label': 'Sound of a dog'}, {'score': 0.4_99, 'label': 'Sound of vaccum cleaner'}] ,)
@unittest.skip('No models are available in TF' )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
pass
@slow
@require_torch
def lowerCamelCase__( self :Any ) -> Optional[Any]:
a__ = pipeline(
task='zero-shot-audio-classification' ,model='laion/clap-htsat-unfused' ,)
# This is an audio of a dog
a__ = load_dataset('ashraq/esc50' )
a__ = dataset['train']['audio'][-1]['array']
a__ = audio_classifier(__snake_case ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) ,[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
] ,)
a__ = audio_classifier([audio] * 5 ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) ,[
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 ,)
a__ = audio_classifier(
[audio] * 5 ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ,batch_size=5 )
self.assertEqual(
nested_simplify(__snake_case ) ,[
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 ,)
@unittest.skip('No models are available in TF' )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
pass
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import os
from distutils.util import strtobool
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
for e in env_keys:
a__ = int(os.environ.get(__lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=False ):
a__ = os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return strtobool(__lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]="no" ):
a__ = os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return value
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
snake_case : List[Any] = get_logger()
snake_case : Optional[dict] = None
class snake_case_ (TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self :Tuple ,__snake_case :List[str]=None ,__snake_case :Dict=None ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(features=__snake_case )
import jax
from jaxlib.xla_client import Device
if isinstance(__snake_case ,__snake_case ):
raise ValueError(
F'Expected {device} to be a `str` not {type(__snake_case )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
a__ = device if isinstance(__snake_case ,__snake_case ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
a__ = str(jax.devices()[0] )
a__ = jnp_array_kwargs
@staticmethod
def lowerCamelCase__( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__snake_case ): device for device in jax.devices()}
def lowerCamelCase__( self :List[Any] ,__snake_case :List[str] ) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(__snake_case ,__snake_case ) and column:
if all(
isinstance(__snake_case ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__snake_case ,axis=0 )
return column
def lowerCamelCase__( self :str ,__snake_case :Any ) -> List[str]:
import jax
import jax.numpy as jnp
if isinstance(__snake_case ,(str, bytes, type(__snake_case )) ):
return value
elif isinstance(__snake_case ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
a__ = {}
if isinstance(__snake_case ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a__ = {'dtype': jnp.intaa}
else:
a__ = {'dtype': jnp.intaa}
elif isinstance(__snake_case ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
a__ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__snake_case ,PIL.Image.Image ):
a__ = np.asarray(__snake_case )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__snake_case ,**{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase__( self :List[Any] ,__snake_case :Tuple ) -> Any:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__snake_case ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__snake_case ,'__array__' ) and not isinstance(__snake_case ,jax.Array ):
a__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__snake_case ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
elif isinstance(__snake_case ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
return self._tensorize(__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :dict ) -> List[Any]:
return map_nested(self._recursive_tensorize ,__snake_case ,map_list=__snake_case )
def lowerCamelCase__( self :Optional[int] ,__snake_case :pa.Table ) -> Mapping:
a__ = self.numpy_arrow_extractor().extract_row(__snake_case )
a__ = self.python_features_decoder.decode_row(__snake_case )
return self.recursive_tensorize(__snake_case )
def lowerCamelCase__( self :int ,__snake_case :pa.Table ) -> "jax.Array":
a__ = self.numpy_arrow_extractor().extract_column(__snake_case )
a__ = self.python_features_decoder.decode_column(__snake_case ,pa_table.column_names[0] )
a__ = self.recursive_tensorize(__snake_case )
a__ = self._consolidate(__snake_case )
return column
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :pa.Table ) -> Mapping:
a__ = self.numpy_arrow_extractor().extract_batch(__snake_case )
a__ = self.python_features_decoder.decode_batch(__snake_case )
a__ = self.recursive_tensorize(__snake_case )
for column_name in batch:
a__ = self._consolidate(batch[column_name] )
return batch
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = XLMTokenizer
UpperCAmelCase__ : Optional[Any] = False
def lowerCamelCase__( self :Dict ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ) -> List[str]:
a__ = 'lower newer'
a__ = 'lower newer'
return input_text, output_text
def lowerCamelCase__( self :Tuple ) -> int:
a__ = XLMTokenizer(self.vocab_file ,self.merges_file )
a__ = 'lower'
a__ = ['low', 'er</w>']
a__ = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
a__ = tokens + ['<unk>']
a__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) ,__snake_case )
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
a__ = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
a__ = tokenizer.encode('sequence builders' ,add_special_tokens=__snake_case )
a__ = tokenizer.encode('multi-sequence build' ,add_special_tokens=__snake_case )
a__ = tokenizer.build_inputs_with_special_tokens(__snake_case )
a__ = tokenizer.build_inputs_with_special_tokens(__snake_case ,__snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : list[list[int | float]] ):
a__ = len(__lowerCAmelCase )
a__ = len(matrix[0] )
a__ = min(__lowerCAmelCase , __lowerCAmelCase )
for row in range(__lowerCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __lowerCAmelCase ):
a__ = matrix[col][row] / matrix[row][row]
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a__ = True
for i in range(row + 1 , __lowerCAmelCase ):
if matrix[i][row] != 0:
a__ , a__ = matrix[i], matrix[row]
a__ = False
break
if reduce:
rank -= 1
for i in range(__lowerCAmelCase ):
a__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
from PIL import Image
def __lowercase ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
snake_case : Optional[Any] = change_brightness(img, 1_00)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : Dict = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = '''encodec'''
def __init__( self :str ,__snake_case :Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] ,__snake_case :Union[str, Any]=2_40_00 ,__snake_case :Optional[int]=1 ,__snake_case :Union[str, Any]=False ,__snake_case :List[str]=None ,__snake_case :Optional[int]=None ,__snake_case :Optional[Any]=1_28 ,__snake_case :Optional[int]=32 ,__snake_case :int=1 ,__snake_case :List[Any]=[8, 5, 4, 2] ,__snake_case :Union[str, Any]="weight_norm" ,__snake_case :Optional[int]=7 ,__snake_case :Tuple=7 ,__snake_case :Optional[Any]=3 ,__snake_case :Union[str, Any]=2 ,__snake_case :Union[str, Any]=True ,__snake_case :Union[str, Any]="reflect" ,__snake_case :Tuple=2 ,__snake_case :Tuple=2 ,__snake_case :Any=1.0 ,__snake_case :str=10_24 ,__snake_case :int=None ,__snake_case :Optional[int]=True ,**__snake_case :Tuple ,) -> Dict:
a__ = target_bandwidths
a__ = sampling_rate
a__ = audio_channels
a__ = normalize
a__ = chunk_length_s
a__ = overlap
a__ = hidden_size
a__ = num_filters
a__ = num_residual_layers
a__ = upsampling_ratios
a__ = norm_type
a__ = kernel_size
a__ = last_kernel_size
a__ = residual_kernel_size
a__ = dilation_growth_rate
a__ = use_causal_conv
a__ = pad_mode
a__ = compress
a__ = num_lstm_layers
a__ = trim_right_ratio
a__ = codebook_size
a__ = codebook_dim if codebook_dim is not None else hidden_size
a__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase__( self :str ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase__( self :str ) -> int:
a__ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase__( self :str ) -> int:
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False ):
try:
a__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ = default
else:
# KEY is set, convert it to True or False.
try:
a__ = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
snake_case : Dict = parse_flag_from_env('''RUN_SLOW''', default=False)
snake_case : str = parse_flag_from_env('''RUN_REMOTE''', default=False)
snake_case : int = parse_flag_from_env('''RUN_LOCAL''', default=True)
snake_case : Dict = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
snake_case : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
snake_case : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
snake_case : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
snake_case : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
snake_case : List[str] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
snake_case : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
snake_case : Tuple = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __lowercase ( __lowerCAmelCase : List[Any] ):
try:
import faiss # noqa
except ImportError:
a__ = unittest.skip('test requires faiss' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Optional[int] ):
try:
import regex # noqa
except ImportError:
a__ = unittest.skip('test requires regex' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : int ):
try:
import elasticsearch # noqa
except ImportError:
a__ = unittest.skip('test requires elasticsearch' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : int ):
try:
import sqlalchemy # noqa
except ImportError:
a__ = unittest.skip('test requires sqlalchemy' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
if not config.TORCH_AVAILABLE:
a__ = unittest.skip('test requires PyTorch' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
if not config.TF_AVAILABLE:
a__ = unittest.skip('test requires TensorFlow' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
if not config.JAX_AVAILABLE:
a__ = unittest.skip('test requires JAX' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if not config.PIL_AVAILABLE:
a__ = unittest.skip('test requires Pillow' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : List[Any] ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Dict ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Any ):
def _require_spacy_model(__lowerCAmelCase : Any ):
try:
import spacy # noqa F401
spacy.load(__lowerCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowerCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowerCAmelCase ) )(__lowerCAmelCase )
else:
return test_case
return _require_spacy_model
def __lowercase ( __lowerCAmelCase : Dict ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Dict ):
if not _run_slow_tests or _run_slow_tests == 0:
a__ = unittest.skip('test is slow' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Any ):
if not _run_local_tests or _run_local_tests == 0:
a__ = unittest.skip('test is local' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : str ):
if not _run_packaged_tests or _run_packaged_tests == 0:
a__ = unittest.skip('test is packaged' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
if not _run_remote_tests or _run_remote_tests == 0:
a__ = unittest.skip('test requires remote' )(__lowerCAmelCase )
return test_case
def __lowercase ( *__lowerCAmelCase : int ):
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(__lowerCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
a__ = decorator(__lowerCAmelCase )
setattr(cls , __lowerCAmelCase , __lowerCAmelCase )
return cls
return decorate
class snake_case_ (lowerCamelCase_ ):
pass
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[Any] = 2
@contextmanager
def __lowercase ( __lowerCAmelCase : Optional[int]=OfflineSimulationMode.CONNECTION_FAILS , __lowerCAmelCase : str=1E-1_6 ):
a__ = requests.Session().request
def timeout_request(__lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , **__lowerCAmelCase : List[str] ):
# Change the url to an invalid url so that the connection hangs
a__ = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
a__ = timeout
try:
return online_request(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a__ = url
a__ = e.args[0]
a__ = (max_retry_error.args[0].replace('10.255.255.1' , F'OfflineMock[{url}]' ),)
a__ = (max_retry_error,)
raise
def raise_connection_error(__lowerCAmelCase : Any , __lowerCAmelCase : str , **__lowerCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__lowerCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def __lowercase ( *__lowerCAmelCase : str , **__lowerCAmelCase : Dict ):
a__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowerCAmelCase , **__lowerCAmelCase ) as tmp_dir:
try:
os.chdir(__lowerCAmelCase )
yield
finally:
os.chdir(__lowerCAmelCase )
@contextmanager
def __lowercase ( ):
import gc
gc.collect()
a__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowercase ( ):
import gc
gc.collect()
a__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
return deepcopy(__lowerCAmelCase ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(__lowerCAmelCase ).integers(0 , 1_0_0 , 1_0 ).tolist()
def __lowercase ( __lowerCAmelCase : Any ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowerCAmelCase : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : int ):
try:
return func(*__lowerCAmelCase , **__lowerCAmelCase )
except HTTPError as err:
if str(__lowerCAmelCase ).startswith('500' ) or str(__lowerCAmelCase ).startswith('502' ):
pytest.xfail(str(__lowerCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __lowerCAmelCase )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :Dict ,__snake_case :int ,__snake_case :Union[str, Any] ) -> Dict:
a__ = returncode
a__ = stdout
a__ = stderr
async def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
while True:
a__ = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=False ):
if echo:
print('\nRunning: ' , ' '.join(__lowerCAmelCase ) )
a__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ = []
a__ = []
def tee(__lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]="" ):
a__ = line.decode('utf-8' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=1_8_0 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=True ):
a__ = asyncio.get_event_loop()
a__ = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
a__ = ' '.join(__lowerCAmelCase )
if result.returncode > 0:
a__ = '\n'.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def __lowercase ( ):
a__ = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
a__ = re.sub(R'^gw' , '' , __lowerCAmelCase , 0 , re.M )
return int(__lowerCAmelCase )
def __lowercase ( ):
a__ = 2_9_5_0_0
a__ = pytest_xdist_worker_id()
return port + uniq_delta
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
from jiwer import compute_measures
import datasets
snake_case : Union[str, Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
snake_case : Any = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
snake_case : List[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Dict ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple=None ,__snake_case :Dict=None ,__snake_case :List[Any]=False ) -> int:
if concatenate_texts:
return compute_measures(__snake_case ,__snake_case )["wer"]
else:
a__ = 0
a__ = 0
for prediction, reference in zip(__snake_case ,__snake_case ):
a__ = compute_measures(__snake_case ,__snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Dict ) -> Any:
a__ = ['a', 'b', 'c']
# Defaults to last layer if both are None
a__ , a__ = get_aligned_output_features_output_indices(__snake_case ,__snake_case ,__snake_case )
self.assertEqual(__snake_case ,['c'] )
self.assertEqual(__snake_case ,[2] )
# Out indices set to match out features
a__ , a__ = get_aligned_output_features_output_indices(['a', 'c'] ,__snake_case ,__snake_case )
self.assertEqual(__snake_case ,['a', 'c'] )
self.assertEqual(__snake_case ,[0, 2] )
# Out features set to match out indices
a__ , a__ = get_aligned_output_features_output_indices(__snake_case ,[0, 2] ,__snake_case )
self.assertEqual(__snake_case ,['a', 'c'] )
self.assertEqual(__snake_case ,[0, 2] )
# Out features selected from negative indices
a__ , a__ = get_aligned_output_features_output_indices(__snake_case ,[-3, -1] ,__snake_case )
self.assertEqual(__snake_case ,['a', 'c'] )
self.assertEqual(__snake_case ,[-3, -1] )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
# Stage names must be set
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0, 1) ,__snake_case )
# Out features must be a list
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(('a', 'b') ,(0, 1) ,['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0, 1) ,['a'] )
# Out indices must be a list or tuple
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(__snake_case ,0 ,['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(__snake_case ,(0, 1) ,['a'] )
# Out features and out indices must be the same length
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0,) ,['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['a', 'b'] ,(0, 2) ,['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(__snake_case ):
verify_out_features_out_indices(['b', 'a'] ,(0, 1) ,['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] ,(0, 1, -1) ,['a', 'b', 'c', 'd'] )
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = BackboneMixin()
a__ = ['a', 'b', 'c']
a__ = ['a', 'c']
a__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,['a', 'c'] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
a__ = ['a', 'b']
self.assertEqual(backbone.out_features ,['a', 'b'] )
self.assertEqual(backbone.out_indices ,[0, 1] )
a__ = [-3, -1]
self.assertEqual(backbone.out_features ,['a', 'c'] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
class snake_case_ :
def __init__( self :List[Any] ,__snake_case :str = "" ,__snake_case :bool = False ) -> None:
# Mapping from the first character of the prefix of the node
a__ = {}
# A node will be a leaf if the tree contains its word
a__ = is_leaf
a__ = prefix
def lowerCamelCase__( self :List[str] ,__snake_case :str ) -> tuple[str, str, str]:
a__ = 0
for q, w in zip(self.prefix ,__snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :list[str] ) -> None:
for word in words:
self.insert(__snake_case )
def lowerCamelCase__( self :str ,__snake_case :str ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
a__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
a__ = RadixNode(prefix=__snake_case ,is_leaf=__snake_case )
else:
a__ = self.nodes[word[0]]
a__ , a__ , a__ = incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
a__ = remaining_prefix
a__ = self.nodes[matching_string[0]]
a__ = RadixNode(__snake_case ,__snake_case )
a__ = aux_node
if remaining_word == "":
a__ = True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> bool:
a__ = self.nodes.get(word[0] ,__snake_case )
if not incoming_node:
return False
else:
a__ , a__ , a__ = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def lowerCamelCase__( self :List[str] ,__snake_case :str ) -> bool:
a__ = self.nodes.get(word[0] ,__snake_case )
if not incoming_node:
return False
else:
a__ , a__ , a__ = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
a__ = list(self.nodes.values() )[0]
a__ = merging_node.is_leaf
self.prefix += merging_node.prefix
a__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
a__ = False
# If there is 1 edge, we merge it with its child
else:
a__ = list(incoming_node.nodes.values() )[0]
a__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
a__ = merging_node.nodes
return True
def lowerCamelCase__( self :str ,__snake_case :int = 0 ) -> None:
if self.prefix != "":
print('-' * height ,self.prefix ,' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowercase ( ):
a__ = 'banana bananas bandana band apple all beast'.split()
a__ = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __lowercase ( ):
assert test_trie()
def __lowercase ( ):
a__ = RadixNode()
a__ = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = TransfoXLTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = False
def lowerCamelCase__( self :Any ) -> Dict:
super().setUp()
a__ = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCamelCase__( self :str ,**__snake_case :Optional[Any] ) -> Union[str, Any]:
a__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**__snake_case )
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ) -> Union[str, Any]:
a__ = '<unk> UNwanted , running'
a__ = '<unk> unwanted, running'
return input_text, output_text
def lowerCamelCase__( self :Union[str, Any] ) -> Union[str, Any]:
a__ = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=__snake_case )
a__ = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(__snake_case ,['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) ,[0, 4, 8, 7] )
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
a__ = TransfoXLTokenizer(lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = TransfoXLTokenizer(lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = TransfoXLTokenizer(lower_case=__snake_case )
a__ = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
a__ = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(__snake_case ) ,__snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(__snake_case ) ,__snake_case )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = self.get_tokenizer()
a__ = len(__snake_case )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__snake_case ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'new1' )
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = '''trocr'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
UpperCAmelCase__ : Optional[Any] = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self :Any ,__snake_case :int=5_02_65 ,__snake_case :Dict=10_24 ,__snake_case :List[str]=12 ,__snake_case :List[Any]=16 ,__snake_case :Optional[int]=40_96 ,__snake_case :Union[str, Any]="gelu" ,__snake_case :Optional[int]=5_12 ,__snake_case :Tuple=0.1 ,__snake_case :Any=0.0 ,__snake_case :str=0.0 ,__snake_case :Optional[Any]=2 ,__snake_case :Union[str, Any]=0.02 ,__snake_case :List[str]=0.0 ,__snake_case :Union[str, Any]=True ,__snake_case :List[Any]=False ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Union[str, Any]=1 ,__snake_case :List[str]=0 ,__snake_case :List[Any]=2 ,**__snake_case :List[Any] ,) -> Optional[Any]:
a__ = vocab_size
a__ = d_model
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = decoder_ffn_dim
a__ = activation_function
a__ = max_position_embeddings
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = init_std
a__ = decoder_layerdrop
a__ = use_cache
a__ = scale_embedding
a__ = use_learned_position_embeddings
a__ = layernorm_embedding
super().__init__(
pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,decoder_start_token_id=__snake_case ,**__snake_case ,)
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = KandinskyVaaPipeline
UpperCAmelCase__ : Union[str, Any] = [
'''image_embeds''',
'''negative_image_embeds''',
]
UpperCAmelCase__ : Any = ['''image_embeds''', '''negative_image_embeds''']
UpperCAmelCase__ : Tuple = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ : Any = False
@property
def lowerCamelCase__( self :Tuple ) -> Dict:
return 32
@property
def lowerCamelCase__( self :List[Any] ) -> Any:
return 32
@property
def lowerCamelCase__( self :Any ) -> Any:
return self.time_input_dim
@property
def lowerCamelCase__( self :int ) -> Tuple:
return self.time_input_dim * 4
@property
def lowerCamelCase__( self :str ) -> Optional[Any]:
return 1_00
@property
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
a__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__ = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCamelCase__( self :int ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__( self :str ) -> Dict:
torch.manual_seed(0 )
a__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__( self :Tuple ) -> str:
a__ = self.dummy_unet
a__ = self.dummy_movq
a__ = DDIMScheduler(
num_train_timesteps=10_00 ,beta_schedule='linear' ,beta_start=0.0_00_85 ,beta_end=0.0_12 ,clip_sample=__snake_case ,set_alpha_to_one=__snake_case ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=__snake_case ,)
a__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase__( self :Tuple ,__snake_case :Tuple ,__snake_case :List[str]=0 ) -> Dict:
a__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__snake_case ) ).to(__snake_case )
a__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__snake_case )
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__snake_case )
a__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = pipe(**self.get_dummy_inputs(__snake_case ) )
a__ = output.images
a__ = pipe(
**self.get_dummy_inputs(__snake_case ) ,return_dict=__snake_case ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
a__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
a__ = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa )
a__ = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
a__ = 'red cat, 4k photo'
a__ = torch.Generator(device='cuda' ).manual_seed(0 )
a__ , a__ = pipe_prior(
__snake_case ,generator=__snake_case ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
a__ = torch.Generator(device='cuda' ).manual_seed(0 )
a__ = pipeline(
image_embeds=__snake_case ,negative_image_embeds=__snake_case ,generator=__snake_case ,num_inference_steps=1_00 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case ,__snake_case )
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case_ :
def __init__( self :List[Any] ,__snake_case :Optional[int] ,__snake_case :List[str]=13 ,__snake_case :Optional[int]=10 ,__snake_case :List[Any]=3 ,__snake_case :List[Any]=2 ,__snake_case :Optional[int]=2 ,__snake_case :int=2 ,__snake_case :str=True ,__snake_case :Any=True ,__snake_case :int=32 ,__snake_case :List[str]=5 ,__snake_case :Optional[int]=4 ,__snake_case :Optional[Any]=37 ,__snake_case :Optional[Any]="gelu" ,__snake_case :int=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=10 ,__snake_case :Dict=0.02 ,__snake_case :List[str]=0.9 ,__snake_case :Optional[int]=None ,) -> str:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = patch_size
a__ = tubelet_size
a__ = num_frames
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = mask_ratio
a__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
a__ = (image_size // patch_size) ** 2
a__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
a__ = int(mask_ratio * self.seq_length )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :Any ) -> Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__snake_case ,initializer_range=self.initializer_range ,)
def lowerCamelCase__( self :Tuple ,__snake_case :List[Any] ,__snake_case :Any ,__snake_case :Union[str, Any] ) -> Any:
a__ = VideoMAEModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :Dict ) -> Union[str, Any]:
a__ = VideoMAEForPreTraining(__snake_case )
model.to(__snake_case )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__ = torch.ones((self.num_masks,) )
a__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
a__ = mask.expand(self.batch_size ,-1 ).bool()
a__ = model(__snake_case ,__snake_case )
# model only returns predictions for masked patches
a__ = mask.sum().item()
a__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : Dict = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Any = False
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ = VideoMAEModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case ,hidden_size=37 )
def lowerCamelCase__( self :Any ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :List[Any]=False ) -> Optional[int]:
a__ = copy.deepcopy(__snake_case )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__ = torch.ones((self.model_tester.num_masks,) )
a__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
a__ = mask.expand(self.model_tester.batch_size ,-1 ).bool()
a__ = bool_masked_pos.to(__snake_case )
if return_labels:
if model_class in [
*get_values(__snake_case ),
]:
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
return inputs_dict
def lowerCamelCase__( self :Dict ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def lowerCamelCase__( self :List[Any] ) -> Tuple:
pass
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case ,nn.Linear ) )
def lowerCamelCase__( self :Tuple ) -> Any:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :int ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
@slow
def lowerCamelCase__( self :str ) -> Optional[Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = VideoMAEModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__( self :Tuple ) -> Any:
if not self.has_attentions:
pass
else:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
for model_class in self.all_model_classes:
a__ = self.model_tester.seq_length - self.model_tester.num_masks
a__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
a__ = True
a__ = False
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
a__ = len(__snake_case )
# Check attention is always last and order is fine
a__ = True
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
self.assertEqual(out_len + 1 ,len(__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
def check_hidden_states_output(__snake_case :Dict ,__snake_case :str ,__snake_case :Union[str, Any] ):
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.hidden_states
a__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__snake_case ) ,__snake_case )
a__ = self.model_tester.seq_length - self.model_tester.num_masks
a__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Dict ) -> Optional[int]:
pass
def __lowercase ( ):
a__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__ = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
@require_torch
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__( self :List[str] ) -> List[Any]:
a__ = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
__snake_case )
a__ = self.default_image_processor
a__ = prepare_video()
a__ = image_processor(__snake_case ,return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a__ = model(**__snake_case )
# verify the logits
a__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape ,__snake_case )
a__ = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4 ) )
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(__snake_case )
a__ = self.default_image_processor
a__ = prepare_video()
a__ = image_processor(__snake_case ,return_tensors='pt' ).to(__snake_case )
# add boolean mask, indicating which patches to mask
a__ = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' )
a__ = torch.load(__snake_case )
# forward pass
with torch.no_grad():
a__ = model(**__snake_case )
# verify the logits
a__ = torch.Size([1, 14_08, 15_36] )
a__ = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ,device=__snake_case )
self.assertEqual(outputs.logits.shape ,__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,__snake_case ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
a__ = torch.tensor([0.51_42] ,device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss ,__snake_case ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
a__ = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ,norm_pix_loss=__snake_case ).to(
__snake_case )
with torch.no_grad():
a__ = model(**__snake_case )
a__ = torch.tensor(torch.tensor([0.64_69] ) ,device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss ,__snake_case ,atol=1E-4 ) )
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
snake_case : str = True
except (ImportError, AttributeError):
snake_case : Dict = object
def __lowercase ( *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[Any] ):
pass
snake_case : Dict = False
snake_case : Dict = logging.get_logger('''transformers-cli/serving''')
def __lowercase ( __lowerCAmelCase : Namespace ):
a__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__lowerCAmelCase , args.host , args.port , args.workers )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : dict
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[List[int]]
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Any
class snake_case_ (lowerCamelCase_ ):
@staticmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> Any:
a__ = parser.add_parser(
'serve' ,help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' ,type=__snake_case ,choices=get_supported_tasks() ,help='The task to run the pipeline on' ,)
serve_parser.add_argument('--host' ,type=__snake_case ,default='localhost' ,help='Interface the server will listen on.' )
serve_parser.add_argument('--port' ,type=__snake_case ,default=88_88 ,help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' ,type=__snake_case ,default=1 ,help='Number of http workers' )
serve_parser.add_argument('--model' ,type=__snake_case ,help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' ,type=__snake_case ,help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' ,type=__snake_case ,help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' ,type=__snake_case ,default=-1 ,help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' ,)
serve_parser.set_defaults(func=__snake_case )
def __init__( self :int ,__snake_case :Pipeline ,__snake_case :str ,__snake_case :int ,__snake_case :int ) -> Optional[int]:
a__ = pipeline
a__ = host
a__ = port
a__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F'Serving model over {host}:{port}' )
a__ = FastAPI(
routes=[
APIRoute(
'/' ,self.model_info ,response_model=__snake_case ,response_class=__snake_case ,methods=['GET'] ,),
APIRoute(
'/tokenize' ,self.tokenize ,response_model=__snake_case ,response_class=__snake_case ,methods=['POST'] ,),
APIRoute(
'/detokenize' ,self.detokenize ,response_model=__snake_case ,response_class=__snake_case ,methods=['POST'] ,),
APIRoute(
'/forward' ,self.forward ,response_model=__snake_case ,response_class=__snake_case ,methods=['POST'] ,),
] ,timeout=6_00 ,)
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
run(self._app ,host=self.host ,port=self.port ,workers=self.workers )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCamelCase__( self :str ,__snake_case :str = Body(__snake_case ,embed=__snake_case ) ,__snake_case :bool = Body(__snake_case ,embed=__snake_case ) ) -> Dict:
try:
a__ = self._pipeline.tokenizer.tokenize(__snake_case )
if return_ids:
a__ = self._pipeline.tokenizer.convert_tokens_to_ids(__snake_case )
return ServeTokenizeResult(tokens=__snake_case ,tokens_ids=__snake_case )
else:
return ServeTokenizeResult(tokens=__snake_case )
except Exception as e:
raise HTTPException(status_code=5_00 ,detail={'model': '', 'error': str(__snake_case )} )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[int] = Body(__snake_case ,embed=__snake_case ) ,__snake_case :bool = Body(__snake_case ,embed=__snake_case ) ,__snake_case :bool = Body(__snake_case ,embed=__snake_case ) ,) -> str:
try:
a__ = self._pipeline.tokenizer.decode(__snake_case ,__snake_case ,__snake_case )
return ServeDeTokenizeResult(model='' ,text=__snake_case )
except Exception as e:
raise HTTPException(status_code=5_00 ,detail={'model': '', 'error': str(__snake_case )} )
async def lowerCamelCase__( self :Tuple ,__snake_case :List[Any]=Body(__snake_case ,embed=__snake_case ) ) -> Dict:
# Check we don't have empty string
if len(__snake_case ) == 0:
return ServeForwardResult(output=[] ,attention=[] )
try:
# Forward through the model
a__ = self._pipeline(__snake_case )
return ServeForwardResult(output=__snake_case )
except Exception as e:
raise HTTPException(5_00 ,{'error': str(__snake_case )} )
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = (EulerDiscreteScheduler,)
UpperCAmelCase__ : Dict = 1_0
def lowerCamelCase__( self :Tuple ,**__snake_case :Dict ) -> int:
a__ = {
'num_train_timesteps': 11_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__snake_case )
return config
def lowerCamelCase__( self :str ) -> int:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] ,[0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__snake_case ,beta_end=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def lowerCamelCase__( self :List[Any] ) -> Tuple:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(prediction_type='v_prediction' )
a__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=__snake_case )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ = sample.to(__snake_case )
for t in scheduler.timesteps:
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def lowerCamelCase__( self :Dict ) -> Any:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__snake_case ,use_karras_sigmas=__snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=__snake_case )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ = sample.to(__snake_case )
for t in scheduler.timesteps:
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
snake_case : Optional[int] = get_tests_dir('''fixtures''')
snake_case : Tuple = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
snake_case : str = get_tests_dir('''fixtures/dummy-config.json''')
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = 0
def lowerCamelCase__( self :Any ) -> List[str]:
a__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Dict ) -> str:
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a__ = AutoFeatureExtractor.from_pretrained(__snake_case ).to_dict()
config_dict.pop('feature_extractor_type' )
a__ = WavaVecaFeatureExtractor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
a__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :List[str] ) -> Tuple:
with self.assertRaisesRegex(
__snake_case ,'bert-base is not a local folder and is not a valid model identifier' ):
a__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def lowerCamelCase__( self :List[str] ) -> Dict:
with self.assertRaisesRegex(
__snake_case ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
a__ = AutoFeatureExtractor.from_pretrained(__snake_case ,revision='aaaaaa' )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
__snake_case ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
a__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCamelCase__( self :Optional[int] ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(__snake_case ,trust_remote_code=__snake_case )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoFeatureExtractor.register(__snake_case ,__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :List[Any] ) -> Any:
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = True
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
# If remote code is not set, the default is to use local
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(not hasattr(__snake_case ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Dict = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case : Optional[int] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowercase ( __lowerCAmelCase : Tuple ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
a__ = [image]
a__ = [trans(img.convert('RGB' ) ) for img in image]
a__ = torch.stack(__lowerCAmelCase )
return image
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[Any] ,__snake_case :Any ,__snake_case :List[Any] ) -> Optional[int]:
super().__init__()
# make sure scheduler can always be converted to DDIM
a__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case ,scheduler=__snake_case )
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> Tuple:
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ,__snake_case :Optional[int] ,__snake_case :Tuple ) -> Any:
# get the original timestep using init_timestep
a__ = min(int(num_inference_steps * strength ) ,__snake_case )
a__ = max(num_inference_steps - init_timestep ,0 )
a__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__( self :Dict ,__snake_case :Tuple ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Any=None ) -> Optional[int]:
if not isinstance(__snake_case ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}' )
a__ = image.to(device=__snake_case ,dtype=__snake_case )
if isinstance(__snake_case ,__snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
a__ = init_latents.shape
a__ = randn_tensor(__snake_case ,generator=__snake_case ,device=__snake_case ,dtype=__snake_case )
# get latents
print('add noise to latents at timestep' ,__snake_case )
a__ = self.scheduler.add_noise(__snake_case ,__snake_case ,__snake_case )
a__ = init_latents
return latents
@torch.no_grad()
def __call__( self :Any ,__snake_case :Union[torch.FloatTensor, PIL.Image.Image] = None ,__snake_case :float = 0.8 ,__snake_case :int = 1 ,__snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__snake_case :float = 0.0 ,__snake_case :int = 50 ,__snake_case :Optional[bool] = None ,__snake_case :Optional[str] = "pil" ,__snake_case :bool = True ,) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(__snake_case )
# 2. Preprocess image
a__ = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case ,device=self.device )
a__ , a__ = self.get_timesteps(__snake_case ,__snake_case ,self.device )
a__ = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
a__ = self.prepare_latents(__snake_case ,__snake_case ,__snake_case ,self.unet.dtype ,self.device ,__snake_case )
a__ = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
a__ = self.unet(__snake_case ,__snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__ = self.scheduler.step(
__snake_case ,__snake_case ,__snake_case ,eta=__snake_case ,use_clipped_model_output=__snake_case ,generator=__snake_case ,).prev_sample
a__ = (image / 2 + 0.5).clamp(0 ,1 )
a__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
a__ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case )
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : str=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Tuple=None , ):
if attention_mask is None:
a__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
a__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
a__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class snake_case_ :
def __init__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Dict=13 ,__snake_case :str=7 ,__snake_case :Tuple=True ,__snake_case :Optional[int]=False ,__snake_case :List[Any]=99 ,__snake_case :str=16 ,__snake_case :Optional[Any]=2 ,__snake_case :List[str]=4 ,__snake_case :Optional[Any]=4 ,__snake_case :int="relu" ,__snake_case :Dict=0.1 ,__snake_case :List[str]=0.1 ,__snake_case :Union[str, Any]=0.0 ,__snake_case :Any=0.0 ,__snake_case :List[str]=20 ,__snake_case :Tuple=2 ,__snake_case :Dict=1 ,__snake_case :Optional[Any]=0 ,) -> List[Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = max_position_embeddings
a__ = eos_token_id
a__ = pad_token_id
a__ = bos_token_id
def lowerCamelCase__( self :Any ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = self.eos_token_id # Eos Token
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a__ = input_ids.clamp(self.pad_token_id + 1 )
a__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
a__ = self.get_config()
a__ = prepare_mam_aaa_inputs_dict(__snake_case ,__snake_case ,__snake_case )
return config, inputs_dict
def lowerCamelCase__( self :List[str] ) -> str:
return MaMaaaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,encoder_layerdrop=self.encoder_layerdrop ,decoder_layerdrop=self.decoder_layerdrop ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :int ) -> str:
a__ , a__ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Dict ,__snake_case :Optional[Any] ) -> Union[str, Any]:
a__ = MaMaaaModel(config=__snake_case ).get_decoder().to(__snake_case ).eval()
a__ = inputs_dict['input_ids']
a__ = inputs_dict['attention_mask']
a__ = inputs_dict['head_mask']
# first forward pass
a__ = model(__snake_case ,attention_mask=__snake_case ,head_mask=__snake_case ,use_cache=__snake_case )
a__ , a__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
a__ = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
a__ = model(__snake_case ,attention_mask=__snake_case )['last_hidden_state']
a__ = model(__snake_case ,attention_mask=__snake_case ,past_key_values=__snake_case )[
'last_hidden_state'
]
# select random slice
a__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case ,__snake_case ,atol=1E-2 ) )
def lowerCamelCase__( self :Tuple ,__snake_case :Any ,__snake_case :Dict ) -> Optional[int]:
a__ = MaMaaaModel(config=__snake_case ).to(__snake_case ).eval()
a__ = model(**__snake_case )
a__ = outputs.encoder_last_hidden_state
a__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = model.get_encoder()
encoder.save_pretrained(__snake_case )
a__ = MaMaaaEncoder.from_pretrained(__snake_case ).to(__snake_case )
a__ = encoder(inputs_dict['input_ids'] ,attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = model.get_decoder()
decoder.save_pretrained(__snake_case )
a__ = MaMaaaDecoder.from_pretrained(__snake_case ).to(__snake_case )
a__ = decoder(
input_ids=inputs_dict['decoder_input_ids'] ,attention_mask=inputs_dict['decoder_attention_mask'] ,encoder_hidden_states=__snake_case ,encoder_attention_mask=inputs_dict['attention_mask'] ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ : str = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase__( self :Dict ,__snake_case :int ,__snake_case :Any ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :List[Any] ) -> Tuple:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = MaMaaaModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case )
def lowerCamelCase__( self :int ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[str] ) -> List[Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
a__ , a__ = model_class.from_pretrained(__snake_case ,output_loading_info=__snake_case )
self.assertEqual(info['missing_keys'] ,[] )
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__snake_case )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
a__ = copy.deepcopy(self._prepare_for_class(__snake_case ,__snake_case ) )
if not self.is_encoder_decoder:
a__ = inputs['input_ids']
del inputs["input_ids"]
else:
a__ = inputs['input_ids']
a__ = inputs.get('decoder_input_ids' ,__snake_case )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' ,__snake_case )
a__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
a__ = wte(__snake_case )
else:
a__ = wte(__snake_case )
a__ = wte(__snake_case )
with torch.no_grad():
model(**__snake_case )[0]
def lowerCamelCase__( self :Any ) -> Tuple:
a__ , a__ = self.model_tester.prepare_config_and_inputs()
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__snake_case )
a__ = MaMaaaForConditionalGeneration(__snake_case ).eval().to(__snake_case )
if torch_device == "cuda":
model.half()
model.generate(__snake_case ,attention_mask=__snake_case )
model.generate(num_beams=4 ,do_sample=__snake_case ,early_stopping=__snake_case ,num_return_sequences=3 )
def __lowercase ( __lowerCAmelCase : Optional[int] ):
return torch.tensor(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase )
snake_case : Any = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
a__ = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__snake_case )
a__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
a__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
a__ = prepare_mam_aaa_inputs_dict(model.config ,__snake_case ,__snake_case )
with torch.no_grad():
a__ = model(**__snake_case )[0]
a__ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape ,__snake_case )
# change to expected output here
a__ = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] ,device=__snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__snake_case ,atol=__snake_case ) )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__snake_case )
# change to intended input
a__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
a__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
a__ = prepare_mam_aaa_inputs_dict(model.config ,__snake_case ,__snake_case )
with torch.no_grad():
a__ = model(**__snake_case )[0]
a__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape ,__snake_case )
# change to expected output here
a__ = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] ,device=__snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__snake_case ,atol=__snake_case ) )
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
a__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__snake_case )
a__ = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ,src_lang='fr' ,tgt_lang='en' )
a__ = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
a__ = tokenizer(__snake_case ,padding=__snake_case ,return_tensors='pt' )
a__ = model.generate(
input_ids=dct['input_ids'].to(__snake_case ) ,attention_mask=dct['attention_mask'].to(__snake_case ) ,num_beams=5 ,forced_bos_token_id=tokenizer.get_lang_id('en' ) ,)
a__ = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
a__ = tokenizer.batch_decode(
hypotheses_batch.tolist() ,clean_up_tokenization_spaces=__snake_case ,skip_special_tokens=__snake_case )
assert generated == expected_en
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
snake_case : List[str] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = '''upernet'''
def __init__( self :Union[str, Any] ,__snake_case :Optional[Any]=None ,__snake_case :Optional[Any]=5_12 ,__snake_case :List[Any]=0.02 ,__snake_case :List[Any]=[1, 2, 3, 6] ,__snake_case :Union[str, Any]=True ,__snake_case :Union[str, Any]=0.4 ,__snake_case :List[str]=3_84 ,__snake_case :Union[str, Any]=2_56 ,__snake_case :int=1 ,__snake_case :Any=False ,__snake_case :str=2_55 ,**__snake_case :List[str] ,) -> List[str]:
super().__init__(**__snake_case )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
a__ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(__snake_case ,__snake_case ):
a__ = backbone_config.get('model_type' )
a__ = CONFIG_MAPPING[backbone_model_type]
a__ = config_class.from_dict(__snake_case )
a__ = backbone_config
a__ = hidden_size
a__ = initializer_range
a__ = pool_scales
a__ = use_auxiliary_head
a__ = auxiliary_loss_weight
a__ = auxiliary_in_channels
a__ = auxiliary_channels
a__ = auxiliary_num_convs
a__ = auxiliary_concat_input
a__ = loss_ignore_index
def lowerCamelCase__( self :List[str] ) -> str:
a__ = copy.deepcopy(self.__dict__ )
a__ = self.backbone_config.to_dict()
a__ = self.__class__.model_type
return output
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import os
def __lowercase ( __lowerCAmelCase : Optional[int] ):
a__ = len(grid[0] )
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCAmelCase ):
for j in range(n_rows - 3 ):
a__ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a__ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a__ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a__ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a__ = max(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if max_product > largest:
a__ = max_product
return largest
def __lowercase ( ):
a__ = []
with open(os.path.dirname(__lowerCAmelCase ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
a__ = [[int(__lowerCAmelCase ) for i in grid[j]] for j in range(len(__lowerCAmelCase ) )]
return largest_product(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Tuple = '''▁'''
snake_case : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
snake_case : List[str] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
snake_case : List[str] = {
'''facebook/mbart-large-en-ro''': 10_24,
'''facebook/mbart-large-cc25''': 10_24,
}
# fmt: off
snake_case : Tuple = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self :str ,__snake_case :List[str] ,__snake_case :Tuple="<s>" ,__snake_case :Any="</s>" ,__snake_case :Dict="</s>" ,__snake_case :int="<s>" ,__snake_case :str="<unk>" ,__snake_case :Dict="<pad>" ,__snake_case :int="<mask>" ,__snake_case :Any=None ,__snake_case :List[Any]=None ,__snake_case :List[Any]=None ,__snake_case :Optional[Dict[str, Any]] = None ,__snake_case :Union[str, Any]=None ,**__snake_case :Union[str, Any] ,) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else mask_token
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case ,eos_token=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,cls_token=__snake_case ,pad_token=__snake_case ,mask_token=__snake_case ,tokenizer_file=__snake_case ,src_lang=__snake_case ,tgt_lang=__snake_case ,additional_special_tokens=__snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**__snake_case ,)
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
a__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ = 1
a__ = len(self.sp_model )
a__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
a__ = {v: k for k, v in self.lang_code_to_id.items()}
a__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a__ = src_lang if src_lang is not None else 'en_XX'
a__ = self.lang_code_to_id[self._src_lang]
a__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self :List[Any] ) -> Union[str, Any]:
a__ = self.__dict__.copy()
a__ = None
a__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :str ,__snake_case :List[str] ) -> List[str]:
a__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__( self :Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ) -> None:
a__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__( self :Tuple ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ,__snake_case :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case ,token_ids_a=__snake_case ,already_has_special_tokens=__snake_case )
a__ = [1] * len(self.prefix_tokens )
a__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowerCamelCase__( self :int ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__( self :int ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :str ,__snake_case :Optional[str] ,__snake_case :Optional[str] ,**__snake_case :List[Any] ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a__ = src_lang
a__ = self(__snake_case ,add_special_tokens=__snake_case ,return_tensors=__snake_case ,**__snake_case )
a__ = self.convert_tokens_to_ids(__snake_case )
a__ = tgt_lang_id
return inputs
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__( self :Any ,__snake_case :str ) -> List[str]:
return self.sp_model.encode(__snake_case ,out_type=__snake_case )
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__( self :Dict ,__snake_case :Union[str, Any] ) -> List[Any]:
a__ = ''.join(__snake_case ).replace(__snake_case ,' ' ).strip()
return out_string
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case ,'wb' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ,__snake_case :str = "en_XX" ,__snake_case :Optional[List[str]] = None ,__snake_case :str = "ro_RO" ,**__snake_case :Tuple ,) -> BatchEncoding:
a__ = src_lang
a__ = tgt_lang
return super().prepare_seqaseq_batch(__snake_case ,__snake_case ,**__snake_case )
def lowerCamelCase__( self :int ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ) -> None:
a__ = self.lang_code_to_id[src_lang]
a__ = []
a__ = [self.eos_token_id, self.cur_lang_code]
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ) -> None:
a__ = self.lang_code_to_id[lang]
a__ = []
a__ = [self.eos_token_id, self.cur_lang_code]
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
snake_case : Optional[int] = 5
snake_case : Optional[int] = 10
@require_sentencepiece
@require_tokenizers
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = SpeechaTextTokenizer
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = True
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
super().setUp()
a__ = sp.SentencePieceProcessor()
spm_model.Load(__snake_case )
a__ = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__snake_case ) )]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = Path(self.tmpdirname )
save_json(__snake_case ,save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case ,save_dir / VOCAB_FILES_NAMES['spm_file'] )
a__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = '<pad>'
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) ,__snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) ,__snake_case )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(__snake_case ) ,10_01 )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size ,10_01 )
def lowerCamelCase__( self :List[str] ) -> List[Any]:
a__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
a__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) ,[2_89, 50, 14, 1_74, 3_86] ,)
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case ,[SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] ,)
a__ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(__snake_case ,[12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
a__ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case ,[SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] ,)
@slow
def lowerCamelCase__( self :Any ) -> List[Any]:
# fmt: off
a__ = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case ,model_name='facebook/s2t-small-mustc-en-de-st' ,revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' ,)
@require_sentencepiece
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
UpperCAmelCase__ : str = '''C\'est trop cool'''
UpperCAmelCase__ : Optional[Any] = '''Esto es genial'''
@classmethod
def lowerCamelCase__( cls :Any ) -> str:
a__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCamelCase__( self :Optional[Any] ) -> str:
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] ,4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] ,6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] ,9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] ,11 )
def lowerCamelCase__( self :Any ) -> str:
self.assertEqual(self.tokenizer.vocab_size ,1_00_00 )
def lowerCamelCase__( self :Any ) -> List[str]:
self.assertIn(__snake_case ,self.tokenizer.all_special_ids )
a__ = [ES_CODE, 4, 16_01, 47, 76_47, 2]
a__ = self.tokenizer.decode(__snake_case ,skip_special_tokens=__snake_case )
a__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertNotIn(self.tokenizer.eos_token ,__snake_case )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = 'fr'
a__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] ,__snake_case )
self.assertEqual(encoded[-1] ,self.tokenizer.eos_token_id )
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens ,[FR_CODE] )
a__ = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens ,[ES_CODE] )
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int | float | str ):
try:
a__ = float(__lowerCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
a__ = decimal - int(__lowerCAmelCase )
if fractional_part == 0:
return int(__lowerCAmelCase ), 1
else:
a__ = len(str(__lowerCAmelCase ).split('.' )[1] )
a__ = int(decimal * (1_0**number_of_frac_digits) )
a__ = 1_0**number_of_frac_digits
a__ , a__ = denominator, numerator
while True:
a__ = dividend % divisor
if remainder == 0:
break
a__ , a__ = divisor, remainder
a__ , a__ = numerator / divisor, denominator / divisor
return int(__lowerCAmelCase ), int(__lowerCAmelCase )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction("67") = }""")
print(f"""{decimal_to_fraction("45.0") = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction("6.25") = }""")
print(f"""{decimal_to_fraction("78td") = }""")
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case : int = '''bart'''
snake_case : Union[str, Any] = True
@st.cache(allow_output_mutation=__lowerCAmelCase )
def __lowercase ( ):
if LOAD_DENSE_INDEX:
a__ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
a__ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
a__ = qar_model.eval()
else:
a__ , a__ = (None, None)
if MODEL_TYPE == "bart":
a__ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
a__ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
a__ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
a__ = sas_model.eval()
else:
a__ , a__ = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def __lowercase ( ):
if LOAD_DENSE_INDEX:
a__ = faiss.StandardGpuResources()
a__ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
a__ = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_2_8) , )
a__ = faiss.IndexFlatIP(1_2_8 )
a__ = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase )
wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU
else:
a__ , a__ = (None, None)
a__ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def __lowercase ( ):
a__ = datasets.load_dataset('eli5' , name='LFQA_reddit' )
a__ = elia['train_eli5']
a__ = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_2_8) )
a__ = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(__lowerCAmelCase )
return (elia_train, eli5_train_q_index)
snake_case , snake_case , snake_case : Optional[int] = load_indexes()
snake_case , snake_case , snake_case , snake_case : Any = load_models()
snake_case , snake_case : Optional[int] = load_train_data()
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int=1_0 ):
a__ = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase )
a__ , a__ = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase )
a__ = [elia_train[int(__lowerCAmelCase )] for i in I[0]]
return nn_examples
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]="wiki40b" , __lowerCAmelCase : List[str]="dense" , __lowerCAmelCase : str=1_0 ):
if source == "none":
a__ , a__ = (' <P> '.join(['' for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
a__ , a__ = query_qa_dense_index(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
a__ , a__ = query_es_index(
__lowerCAmelCase , __lowerCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__lowerCAmelCase , )
a__ = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
a__ = 'question: {} context: {}'.format(__lowerCAmelCase , __lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None),
} )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=6_4 , __lowerCAmelCase : List[str]=2_5_6 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : int=0.95 , __lowerCAmelCase : Optional[Any]=0.8 ):
with torch.no_grad():
a__ = qa_sas_generate(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1_0_2_4 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
snake_case : Optional[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
snake_case : str = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case : List[str] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case : List[Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
snake_case : List[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
snake_case : str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
snake_case : Optional[Any] = action_list.index(action_st)
snake_case : Optional[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
snake_case : List[Any] = show_type == '''Show full text of passages'''
else:
snake_case : Dict = 3
snake_case : str = True
snake_case : List[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
snake_case : List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
snake_case : Union[str, Any] = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
snake_case : Tuple = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
snake_case : Dict = '''wiki40b'''
snake_case : List[str] = '''dense'''
snake_case : Optional[int] = '''beam'''
snake_case : List[Any] = 2
snake_case : Tuple = 64
snake_case : List[Any] = 2_56
snake_case : List[Any] = None
snake_case : str = None
snake_case : Union[str, Any] = st.sidebar.checkbox('''Generation options''')
if generate_options:
snake_case : Union[str, Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
snake_case : Any = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
snake_case : Optional[int] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
snake_case : Tuple = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
snake_case : Tuple = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case : Any = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
snake_case : Optional[int] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
snake_case : List[Any] = None
# start main text
snake_case : Tuple = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
snake_case : Dict = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case : Tuple = st.text_input('''Enter your question here:''', '''''')
else:
snake_case : Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case , snake_case : int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
snake_case , snake_case : Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
snake_case : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case : Optional[int] = support_list[:10]
snake_case : Dict = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
snake_case , snake_case : Dict = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case , snake_case : Dict = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
snake_case : str = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
snake_case : Union[str, Any] = res[1].strip()
if sec_titles == "":
snake_case : List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
snake_case : str = sec_titles.split(''' & ''')
snake_case : Any = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
snake_case : Union[str, Any] = find_nearest_training(question)
snake_case : Union[str, Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
snake_case : Tuple = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
snake_case : Dict = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : Optional[Any] = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case : Dict = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.