code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __snake_case :
UpperCAmelCase__ : Optional[Any] = XGLMConfig
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : Any = '''gelu'''
def __init__( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : int=14 , _snake_case : Dict=7 , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=True , _snake_case : List[Any]=True , _snake_case : Optional[int]=99 , _snake_case : Union[str, Any]=32 , _snake_case : Union[str, Any]=2 , _snake_case : List[str]=4 , _snake_case : Optional[int]=37 , _snake_case : List[Any]="gelu" , _snake_case : Tuple=0.1 , _snake_case : str=0.1 , _snake_case : int=512 , _snake_case : Tuple=0.0_2 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = ffn_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = 2
UpperCAmelCase_ = 1
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , )
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) = config_and_inputs
UpperCAmelCase_ = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
UpperCAmelCase__ : Any = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ : Optional[int] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ : int = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = False
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = TFXGLMModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=__a , n_embd=37)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFXGLMModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Optional[Any] , _snake_case : Optional[Any]=True):
"""simple docstring"""
UpperCAmelCase_ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
UpperCAmelCase_ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase_ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
UpperCAmelCase_ = model.generate(__a , do_sample=__a , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __a)
@slow
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
UpperCAmelCase_ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
UpperCAmelCase_ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
UpperCAmelCase_ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
UpperCAmelCase_ = model.generate(__a , do_sample=__a , seed=[7, 0])
UpperCAmelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=__a)
UpperCAmelCase_ = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__a , __a)
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
UpperCAmelCase_ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
UpperCAmelCase_ = 'left'
# use different length sentences to test batching
UpperCAmelCase_ = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCAmelCase_ = tokenizer(__a , return_tensors='''tf''' , padding=__a)
UpperCAmelCase_ = inputs['input_ids']
UpperCAmelCase_ = model.generate(input_ids=__a , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
UpperCAmelCase_ = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
UpperCAmelCase_ = model.generate(input_ids=__a , max_new_tokens=12)
UpperCAmelCase_ = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
UpperCAmelCase_ = model.generate(input_ids=__a , max_new_tokens=12)
UpperCAmelCase_ = tokenizer.batch_decode(__a , skip_special_tokens=__a)
UpperCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a)
UpperCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=__a)
UpperCAmelCase_ = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , [non_padded_sentence, padded_sentence])
| 350 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[Any] = logging.get_logger(__name__)
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase_ = DetaConfig(
backbone_config=lowercase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowercase__ , with_box_refine=lowercase__ , two_stage=lowercase__ , )
# set labels
UpperCAmelCase_ = """huggingface/label-files"""
if "o365" in model_name:
UpperCAmelCase_ = 366
UpperCAmelCase_ = """object365-id2label.json"""
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = """coco-detection-id2label.json"""
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase_ = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def A (__A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def A (__A : int , __A : Union[str, Any] , __A : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = dct.pop(lowercase__ )
UpperCAmelCase_ = val
def A (__A : Optional[Any] , __A : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def A (__A : str , __A : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:hidden_size]
UpperCAmelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size:, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size:]
def A () -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def A (__A : Dict , __A : Optional[Any] , __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = get_deta_config(lowercase__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase_ = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
UpperCAmelCase_ = torch.load(lowercase__ , map_location='''cpu''' )["""model"""]
# original state dict
for name, param in state_dict.items():
print(lowercase__ , param.shape )
# rename keys
UpperCAmelCase_ = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_swin_q_k_v(lowercase__ , config.backbone_config )
read_in_decoder_q_k_v(lowercase__ , lowercase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase_ = state_dict.pop(lowercase__ )
UpperCAmelCase_ = val
if "input_proj" in key:
UpperCAmelCase_ = state_dict.pop(lowercase__ )
UpperCAmelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase_ = state_dict.pop(lowercase__ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetaForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
UpperCAmelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(lowercase__ )
# load image processor
UpperCAmelCase_ = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(images=lowercase__ , return_tensors='''pt''' )
UpperCAmelCase_ = encoding["""pixel_values"""]
UpperCAmelCase_ = model(pixel_values.to(lowercase__ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase_ = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
UpperCAmelCase_ = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
UpperCAmelCase_ = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowercase__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowercase__ ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case_ : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 351 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case_ : Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __snake_case ( _UpperCAmelCase ):
UpperCAmelCase__ : bool = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
UpperCAmelCase__ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
UpperCAmelCase_ = v.to_dict()
return d
| 352 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 0 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __snake_case ( a__ ):
UpperCAmelCase__ : Union[str, Any] = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
UpperCAmelCase__ : List[Any] = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase__ : Dict = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
UpperCAmelCase__ : Any = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(_lowerCamelCase , _lowerCamelCase):
UpperCAmelCase_ = v.to_dict()
return d
| 354 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 0 |
from typing import Any
import numpy as np
def A (__A : np.ndarray ) -> int:
"""simple docstring"""
return np.array_equal(__a , matrix.conjugate().T )
def A (__A : np.ndarray , __A : np.ndarray ) -> int:
"""simple docstring"""
UpperCAmelCase_ = v.conjugate().T
UpperCAmelCase_ = v_star.dot(__a )
assert isinstance(__a , np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def A () -> str:
"""simple docstring"""
UpperCAmelCase_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
UpperCAmelCase_ = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__a , __a ) )
UpperCAmelCase_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__a , __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 355 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
snake_case_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def A (__A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = {}
if train_file is not None:
UpperCAmelCase_ = [train_file]
if eval_file is not None:
UpperCAmelCase_ = [eval_file]
if test_file is not None:
UpperCAmelCase_ = [test_file]
UpperCAmelCase_ = datasets.load_dataset('''csv''' , data_files=lowerCamelCase_ )
UpperCAmelCase_ = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase_ = features_name.pop(lowerCamelCase_ )
UpperCAmelCase_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase_ = {label: i for i, label in enumerate(lowerCamelCase_ )}
UpperCAmelCase_ = tokenizer.model_input_names
UpperCAmelCase_ = {}
if len(lowerCamelCase_ ) == 1:
for k in files.keys():
UpperCAmelCase_ = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' ) , batched=lowerCamelCase_ , )
elif len(lowerCamelCase_ ) == 2:
for k in files.keys():
UpperCAmelCase_ = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , ) , batched=lowerCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase_ = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase_ = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase_ = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase_ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase_ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase_ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : Dict = logging.getLogger(__name__)
@dataclass
class __snake_case :
UpperCAmelCase__ : int = field(metadata={'''help''': '''Which column contains the label'''} )
UpperCAmelCase__ : str = field(default=lowercase__ , metadata={'''help''': '''The path of the training file'''} )
UpperCAmelCase__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the development file'''} )
UpperCAmelCase__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the test file'''} )
UpperCAmelCase__ : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class __snake_case :
UpperCAmelCase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ : bool = field(default=lowercase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def A () -> Dict:
"""simple docstring"""
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
UpperCAmelCase_ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase_ = TFTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCamelCase_ )
return results
if __name__ == "__main__":
main()
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
def A (__A : str , __A : Optional[int] = " " ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for index, char in enumerate(__A ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ = index + 1
elif index + 1 == len(__A ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
snake_case_ : Any = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __snake_case ( nn.Module ):
def __init__( self : List[str] , _snake_case : Optional[Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = torchvision.models.resnetaaa(pretrained=snake_case__)
UpperCAmelCase_ = list(model.children())[:-2]
UpperCAmelCase_ = nn.Sequential(*snake_case__)
UpperCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.pool(self.model(snake_case__))
UpperCAmelCase_ = torch.flatten(snake_case__ , start_dim=2)
UpperCAmelCase_ = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class __snake_case ( A_ ):
def __init__( self : int , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = [json.loads(snake_case__) for l in open(snake_case__)]
UpperCAmelCase_ = os.path.dirname(snake_case__)
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = labels
UpperCAmelCase_ = len(snake_case__)
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = transforms
def __len__( self : str):
"""simple docstring"""
return len(self.data)
def __getitem__( self : Tuple , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=snake_case__))
UpperCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ = sentence[: self.max_seq_length]
UpperCAmelCase_ = torch.zeros(self.n_classes)
UpperCAmelCase_ = 1
UpperCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''])).convert('''RGB''')
UpperCAmelCase_ = self.transforms(snake_case__)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row['''label'''])
return label_freqs
def A (__A : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = [len(row['''sentence'''] ) for row in batch]
UpperCAmelCase_ = len(__lowerCamelCase ), max(__lowerCamelCase )
UpperCAmelCase_ = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
UpperCAmelCase_ = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
UpperCAmelCase_ = input_row["sentence"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = torch.stack([row['''image'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''label'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''image_start_token'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A () -> List[Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A () -> List[str]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 359 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Union[str, Any] , __A : Any , __A : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : Any = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 360 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([] )
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any]=0):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((64, 64))
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionInpaintPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = sd_pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase ( self : int):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(_snake_case , safety_checker=_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
UpperCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , safety_checker=_snake_case , )
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
UpperCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase_ = PNDMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''')
UpperCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
_snake_case , safety_checker=_snake_case , scheduler=_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 361 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = IFPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self._get_dummy_components()
def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : Union[str, Any]=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
UpperCAmelCase_ = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=_snake_case , tokenizer=_snake_case)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
UpperCAmelCase_ = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase_ = None
UpperCAmelCase_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_snake_case , _snake_case , _snake_case , _snake_case)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase_ = IFImgaImgPipeline(**pipe_a.components)
UpperCAmelCase_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_snake_case , _snake_case , _snake_case , _snake_case)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase_ = IFInpaintingPipeline(**pipe_a.components)
UpperCAmelCase_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_snake_case , _snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int]):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : Dict):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def A () -> List[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 362 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 0 |
def A (__A : Optional[int] , __A : str ) -> int:
"""simple docstring"""
if len(a_ ) != len(a_ ):
raise ValueError('''String lengths must match!''' )
UpperCAmelCase_ = 0
for chara, chara in zip(a_ , a_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case_ : Any = re.compile(r"\b(a|an|the)\b", re.UNICODE)
snake_case_ : Union[str, Any] = None
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__a , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__a , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A (__A : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def A (__A : List[Any] ) -> Dict:
"""simple docstring"""
def remove_articles(__A : Union[str, Any] ):
return ARTICLES_REGEX.sub(''' ''' , __a )
def white_space_fix(__A : Dict ):
return " ".join(text.split() )
def remove_punc(__A : List[str] ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def A (__A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not s:
return []
return normalize_answer(__a ).split()
def A (__A : Optional[Any] , __A : str ) -> List[str]:
"""simple docstring"""
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def A (__A : Dict , __A : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_tokens(__a )
UpperCAmelCase_ = get_tokens(__a )
UpperCAmelCase_ = collections.Counter(__a ) & collections.Counter(__a )
UpperCAmelCase_ = sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(__a )
UpperCAmelCase_ = 1.0 * num_same / len(__a )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def A (__A : List[Any] , __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa['''id''']
UpperCAmelCase_ = [t for t in qa['''answers''']['''text'''] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = ['''''']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(__a , __a ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(__a , __a ) for a in gold_answers )
return exact_scores, fa_scores
def A (__A : Any , __A : List[str] , __A : Dict , __A : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def A (__A : Optional[int] , __A : Optional[int] , __A : Tuple=None ) -> List[str]:
"""simple docstring"""
if not qid_list:
UpperCAmelCase_ = len(__a )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
UpperCAmelCase_ = len(__a )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def A (__A : Dict , __A : Union[str, Any] , __A : Tuple ) -> Any:
"""simple docstring"""
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def A (__A : List[str] , __A : Dict , __A : int , __A : Optional[int] ) -> str:
"""simple docstring"""
plt.step(__a , __a , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__a , __a , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def A (__A : Optional[int] , __A : List[Any] , __A : int , __A : Union[str, Any] , __A : Any=None , __A : str=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = sorted(__a , key=lambda __A : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a , __a , __a , __a )
return {"ap": 100.0 * avg_prec}
def A (__A : List[Any] , __A : Any , __A : Dict , __A : Dict , __A : Optional[int] , __A : Union[str, Any] ) -> Dict:
"""simple docstring"""
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
UpperCAmelCase_ = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
UpperCAmelCase_ = {k: float(__a ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__a , __a , '''pr_exact''' )
merge_eval(__a , __a , '''pr_f1''' )
merge_eval(__a , __a , '''pr_oracle''' )
def A (__A : str , __A : Optional[int] , __A : Optional[Any] , __A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__a , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def A (__A : Optional[Any] , __A : str , __A : Union[str, Any] , __A : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(__a , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(__a ), best_thresh
def A (__A : Union[str, Any] , __A : str , __A : List[str] , __A : str , __A : Optional[int] , __A : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = find_best_thresh(__a , __a , __a , __a )
UpperCAmelCase_ = find_best_thresh(__a , __a , __a , __a )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def A () -> Optional[Any]:
"""simple docstring"""
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(__a )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(__a ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ = get_raw_scores(__a , __a )
UpperCAmelCase_ = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(__a , __a )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , '''HasAns''' )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__a , __a , __a , __a , __a , __a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir )
histogram_na_prob(__a , __a , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__a , __a , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__a , __a )
else:
print(json.dumps(__a , indent=2 ) )
if __name__ == "__main__":
snake_case_ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 364 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def A (__A : list[int] , __A : list[int] , __A : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = [0] * no_of_processes
UpperCAmelCase_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = burst_time[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 999999999
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ = remaining_time[j]
UpperCAmelCase_ = j
UpperCAmelCase_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ = remaining_time[short]
if minm == 0:
UpperCAmelCase_ = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ = False
# Find finish time of current process
UpperCAmelCase_ = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ = finish_time - arrival_time[short]
UpperCAmelCase_ = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ = 0
# Increment time
increment_time += 1
return waiting_time
def A (__A : list[int] , __A : int , __A : list[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = [0] * no_of_processes
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
def A (__A : list[int] , __A : list[int] , __A : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = total_waiting_time + waiting_time[i]
UpperCAmelCase_ = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
snake_case_ : List[Any] = int(input())
snake_case_ : Union[str, Any] = [0] * no_of_processes
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
snake_case_ , snake_case_ : Tuple = map(int, input().split())
snake_case_ : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ : Tuple = burst_time
snake_case_ : Tuple = no_of_processes
snake_case_ : Any = waiting_time
snake_case_ : List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
snake_case_ : Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 365 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def A (*__A : str ) -> Optional[int]:
"""simple docstring"""
if not isinstance(a_ , a_ ):
UpperCAmelCase_ = list(a_ )
for i in range(len(a_ ) ):
UpperCAmelCase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def A (__A : Exception ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(a_ , a_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def A (__A : callable = None , __A : int = 128 ) -> List[str]:
"""simple docstring"""
if function is None:
return functools.partial(a_ , starting_batch_size=a_ )
UpperCAmelCase_ = starting_batch_size
def decorator(*__A : Optional[int] , **__A : List[str] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase_ = list(inspect.signature(a_ ).parameters.keys() )
# Guard against user error
if len(a_ ) < (len(a_ ) + 1):
UpperCAmelCase_ = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(a_ , *a_ , **a_ )
except Exception as e:
if should_reduce_batch_size(a_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 366 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Optional[int] , _snake_case : Any , _snake_case : Union[str, Any]=7 , _snake_case : List[Any]=3 , _snake_case : List[Any]=30 , _snake_case : Optional[Any]=400 , _snake_case : int=True , _snake_case : Optional[int]=None , _snake_case : Optional[int]=0.9 , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=True , _snake_case : int=[0.5, 0.5, 0.5] , _snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize_and_center_crop
UpperCAmelCase_ = size
UpperCAmelCase_ = crop_pct
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( _UpperCamelCase , unittest.TestCase ):
UpperCAmelCase__ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = PoolFormerImageProcessingTester(self)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_resize_and_center_crop'''))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''crop_pct'''))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''image_std'''))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 30})
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30})
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 367 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 0 |
def A (__A : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = [0] * len(a__ )
for i in range(1 , len(a__ ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase_ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase_ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase_ = j
return prefix_result
def A (__A : int ) -> Any:
"""simple docstring"""
return max(prefix_function(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def A (__A : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def A (__A : int , __A : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def A (__A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def A (__A : Dict , __A : int , __A : str , __A : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = """imagenet-1k-id2label.json"""
UpperCAmelCase_ = 1000
UpperCAmelCase_ = """huggingface/label-files"""
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase_ = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = CvtConfig(num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase_ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase_ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ = [2, 2, 20]
UpperCAmelCase_ = [3, 12, 16]
UpperCAmelCase_ = [192, 768, 1024]
UpperCAmelCase_ = CvtForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ = list_of_state_dict + cls_token(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = list_of_state_dict + embeddings(__SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ = list_of_state_dict + attention(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
snake_case_ : Any = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 369 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A (__A : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def A (__A : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def A (__A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
UpperCAmelCase_ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
UpperCAmelCase_ = mam_aaa["""model"""]
remove_ignore_keys_(UpperCAmelCase__ )
UpperCAmelCase_ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ = MaMaaaConfig(
vocab_size=UpperCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
UpperCAmelCase_ = state_dict["""decoder.embed_tokens.weight"""]
UpperCAmelCase_ = MaMaaaForConditionalGeneration(UpperCAmelCase__ )
model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
UpperCAmelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 370 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : int , _snake_case : str , _snake_case : Optional[int]=13 , _snake_case : Dict=7 , _snake_case : str=True , _snake_case : Optional[Any]=True , _snake_case : List[Any]=True , _snake_case : int=True , _snake_case : Any=True , _snake_case : Tuple=False , _snake_case : Optional[int]=False , _snake_case : List[Any]=False , _snake_case : Any=2 , _snake_case : Union[str, Any]=99 , _snake_case : int=0 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=5 , _snake_case : List[Any]=4 , _snake_case : Any=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : int=512 , _snake_case : int=2 , _snake_case : List[str]=0.0_2 , _snake_case : Any=2 , _snake_case : int=4 , _snake_case : List[str]="last" , _snake_case : Dict=True , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=0 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_lengths
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = gelu_activation
UpperCAmelCase_ = sinusoidal_embeddings
UpperCAmelCase_ = causal
UpperCAmelCase_ = asm
UpperCAmelCase_ = n_langs
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_special
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = summary_type
UpperCAmelCase_ = use_proj
UpperCAmelCase_ = scope
UpperCAmelCase_ = bos_token_id
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_input_lengths:
UpperCAmelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , 2).float()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCamelCase ( self : str , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Dict , _snake_case : List[str] , ):
"""simple docstring"""
UpperCAmelCase_ = XLMModel(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a , lengths=_a , langs=_a)
UpperCAmelCase_ = model(_a , langs=_a)
UpperCAmelCase_ = model(_a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = XLMWithLMHeadModel(_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = XLMForQuestionAnsweringSimple(_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a)
UpperCAmelCase_ = model(_a , start_positions=_a , end_positions=_a)
UpperCAmelCase_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : int , _snake_case : List[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = XLMForQuestionAnswering(_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a)
UpperCAmelCase_ = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , )
UpperCAmelCase_ = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , )
((UpperCAmelCase_ ) , ) = result_with_labels.to_tuple()
UpperCAmelCase_ = model(_a , start_positions=_a , end_positions=_a)
((UpperCAmelCase_ ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Any , _snake_case : List[str] , ):
"""simple docstring"""
UpperCAmelCase_ = XLMForSequenceClassification(_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a)
UpperCAmelCase_ = model(_a , labels=_a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Tuple , _snake_case : int , _snake_case : Dict , _snake_case : str , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = XLMForTokenClassification(_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a , attention_mask=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : Dict , _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = XLMForMultipleChoice(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class __snake_case ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Dict = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : str):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[Any]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(_a , _a , return_labels=_a)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a)
return inputs_dict
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = XLMModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_a , emb_dim=37)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a)
def lowerCamelCase ( self : List[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Any , _snake_case : Any=False , _snake_case : Dict=1):
"""simple docstring"""
self.assertIsInstance(_a , _a)
self.assertListEqual(
[isinstance(_a , _a) for iter_attentions in attentions] , [True] * len(_a))
self.assertEqual(len(_a) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(_a):
# adds PAD dummy token
UpperCAmelCase_ = min_length + idx + 1
UpperCAmelCase_ = min_length + idx + 1
UpperCAmelCase_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a))
def lowerCamelCase ( self : List[str] , _snake_case : Any , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(_a , _a)
self.assertListEqual(
[isinstance(_a , _a) for iter_hidden_states in hidden_states] , [True] * len(_a) , )
self.assertEqual(len(_a) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(_a):
# adds PAD dummy token
UpperCAmelCase_ = min_length + idx + 1
UpperCAmelCase_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a) , )
pass
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = XLMModel.from_pretrained(_a)
self.assertIsNotNone(_a)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''')
model.to(_a)
UpperCAmelCase_ = torch.tensor([[14, 447]] , dtype=torch.long , device=_a) # the president
UpperCAmelCase_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ = model.generate(_a , do_sample=_a)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a)
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
def A (__A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if len(__lowerCamelCase ) <= 1:
return [tuple(__lowerCamelCase )]
UpperCAmelCase_ = []
def generate(__A : Optional[Any] , __A : Any ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __lowerCamelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase_ = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase_ = arr[k - 1], arr[0]
generate(k - 1 , __lowerCamelCase )
generate(len(__lowerCamelCase ) , __lowerCamelCase )
return res
if __name__ == "__main__":
snake_case_ : Any = input("Enter numbers separated by a comma:\n").strip()
snake_case_ : Optional[Any] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 350 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 0 |
def A (__A : str , __A : str ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = len(_snake_case ) + 1
UpperCAmelCase_ = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase_ = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
UpperCAmelCase_ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
UpperCAmelCase_ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
UpperCAmelCase_ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase_ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase_ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase_ = dp[i - 1][j]
else:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case_ : Any = """aab"""
snake_case_ : str = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"{input_string} matches the given pattern {pattern}")
else:
print(f"{input_string} does not match with the given pattern {pattern}")
| 351 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class __snake_case ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ : Optional[Any] = '''roberta'''
def __init__( self : Any , _snake_case : int=50265 , _snake_case : Tuple=768 , _snake_case : List[Any]=12 , _snake_case : Optional[int]=12 , _snake_case : Dict=3072 , _snake_case : Optional[Any]="gelu" , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]=512 , _snake_case : Optional[Any]=2 , _snake_case : List[Any]=0.0_2 , _snake_case : Any=1e-12 , _snake_case : Union[str, Any]=1 , _snake_case : List[str]=0 , _snake_case : List[Any]=2 , _snake_case : Union[str, Any]="absolute" , _snake_case : Tuple=True , _snake_case : List[str]=None , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 352 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 0 |
"""simple docstring"""
def A (__A : int ) -> bool:
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : int
class __snake_case :
def __init__( self : Dict , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = [[] for _ in range(SCREAMING_SNAKE_CASE_)]
UpperCAmelCase_ = size
def __getitem__( self : Tuple , _snake_case : Dict):
"""simple docstring"""
return iter(self._graph[vertex])
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return self._size
def lowerCamelCase ( self : Tuple , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''')
self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_))
def lowerCamelCase ( self : List[Any] , _snake_case : Tuple , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = deque([start_vertex])
UpperCAmelCase_ = [None] * self.size
UpperCAmelCase_ = 0
while queue:
UpperCAmelCase_ = queue.popleft()
UpperCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase_ = current_distance + edge.weight
UpperCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
snake_case_ : Union[str, Any] = (720, 1280) # Height, Width
snake_case_ : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
snake_case_ : Optional[Any] = 1 / 100
snake_case_ : int = ''''''
snake_case_ : str = ''''''
snake_case_ : List[str] = ''''''
snake_case_ : List[str] = 250
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = get_dataset(lowercase__ , lowercase__ )
for index in range(lowercase__ ):
UpperCAmelCase_ = random.sample(range(len(lowercase__ ) ) , 4 )
UpperCAmelCase_ = update_image_and_anno(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , filter_scale=lowercase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ = random_chars(32 )
UpperCAmelCase_ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCAmelCase_ = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , lowercase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
UpperCAmelCase_ = []
for anno in new_annos:
UpperCAmelCase_ = anno[3] - anno[1]
UpperCAmelCase_ = anno[4] - anno[2]
UpperCAmelCase_ = anno[1] + width / 2
UpperCAmelCase_ = anno[2] + height / 2
UpperCAmelCase_ = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase__ )
with open(F"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def A (__A : str , __A : str ) -> tuple[list, list]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for label_file in glob.glob(os.path.join(lowercase__ , '''*.txt''' ) ):
UpperCAmelCase_ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase__ ) as in_file:
UpperCAmelCase_ = in_file.readlines()
UpperCAmelCase_ = os.path.join(lowercase__ , F"""{label_name}.jpg""" )
UpperCAmelCase_ = []
for obj_list in obj_lists:
UpperCAmelCase_ = obj_list.rstrip('''\n''' ).split(''' ''' )
UpperCAmelCase_ = float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase_ = float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase_ = float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def A (__A : list , __A : list , __A : list[int] , __A : tuple[int, int] , __A : tuple[float, float] , __A : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
UpperCAmelCase_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ = int(scale_x * output_size[1] )
UpperCAmelCase_ = int(scale_y * output_size[0] )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i, index in enumerate(lowercase__ ):
UpperCAmelCase_ = all_img_list[index]
path_list.append(lowercase__ )
UpperCAmelCase_ = all_annos[index]
UpperCAmelCase_ = cva.imread(lowercase__ )
if i == 0: # top-left
UpperCAmelCase_ = cva.resize(lowercase__ , (divid_point_x, divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = bbox[1] * scale_x
UpperCAmelCase_ = bbox[2] * scale_y
UpperCAmelCase_ = bbox[3] * scale_x
UpperCAmelCase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase_ = cva.resize(lowercase__ , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ = bbox[2] * scale_y
UpperCAmelCase_ = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase_ = cva.resize(lowercase__ , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = bbox[1] * scale_x
UpperCAmelCase_ = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ = bbox[3] * scale_x
UpperCAmelCase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase_ = cva.resize(
lowercase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def A (__A : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 355 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
snake_case_ : Dict = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
snake_case_ : Tuple = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def A (__A : str ) -> str:
"""simple docstring"""
if "://" in dataset_path:
UpperCAmelCase_ = dataset_path.split('''://''' )[1]
return dataset_path
def A (__A : fsspec.AbstractFileSystem ) -> int:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def A (__A : fsspec.AbstractFileSystem , __A : str , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = not is_remote_filesystem(A__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(A__ ) , fs._strip_protocol(A__ ) )
else:
fs.mv(A__ , A__ , recursive=A__ )
def A () -> Dict:
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = threading.Lock()
| 356 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 0 |
def A (__A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = [0] * len(__A )
UpperCAmelCase_ = []
UpperCAmelCase_ = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
UpperCAmelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
snake_case_ : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ : str = ['torch', 'scipy']
def __init__( self : List[str] , *_snake_case : Optional[int] , **_snake_case : int):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def lowerCamelCase ( cls : Dict , *_snake_case : Tuple , **_snake_case : Dict):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *_snake_case : List[Any] , **_snake_case : Any):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''])
| 358 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 0 |
"""simple docstring"""
from PIL import Image
def A (__A : Image ) -> Image:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = image.size
UpperCAmelCase_ = 0
UpperCAmelCase_ = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
UpperCAmelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
snake_case_ : Tuple = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 359 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Dict = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __snake_case ( a ):
UpperCAmelCase__ : Any = '''vit_mae'''
def __init__( self : Any , _snake_case : List[str]=768 , _snake_case : Optional[int]=12 , _snake_case : str=12 , _snake_case : str=3072 , _snake_case : str="gelu" , _snake_case : List[Any]=0.0 , _snake_case : Any=0.0 , _snake_case : Optional[Any]=0.0_2 , _snake_case : List[str]=1e-12 , _snake_case : str=224 , _snake_case : Dict=16 , _snake_case : Dict=3 , _snake_case : List[str]=True , _snake_case : Tuple=16 , _snake_case : Tuple=512 , _snake_case : Union[str, Any]=8 , _snake_case : Dict=2048 , _snake_case : List[Any]=0.7_5 , _snake_case : Union[str, Any]=False , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = decoder_num_attention_heads
UpperCAmelCase_ = decoder_hidden_size
UpperCAmelCase_ = decoder_num_hidden_layers
UpperCAmelCase_ = decoder_intermediate_size
UpperCAmelCase_ = mask_ratio
UpperCAmelCase_ = norm_pix_loss
| 360 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Dict = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ["BeitFeatureExtractor"]
snake_case_ : Union[str, Any] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self : Dict , _snake_case : List[str]=0):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(_a))
UpperCAmelCase_ = np.random.RandomState(_a)
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
# warmup pass to apply optimizations
UpperCAmelCase_ = pipe(**self.get_dummy_inputs())
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
UpperCAmelCase_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ = np.random.RandomState(0)
UpperCAmelCase_ = pipe(
prompt=_a , image=_a , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
UpperCAmelCase_ = init_image.resize((768, 512))
UpperCAmelCase_ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ = np.random.RandomState(0)
UpperCAmelCase_ = pipe(
prompt=_a , image=_a , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 362 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 0 |
snake_case_ : List[str] = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def A (__A : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
while place < len(_a ):
if (place + 1 < len(_a )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A (__A : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ = []
for arabic, roman in ROMAN:
(UpperCAmelCase_) = divmod(_a , _a )
result.append(roman * factor )
if number == 0:
break
return "".join(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 0 |
def A (__A : int ) -> str:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCAmelCase_ = grid[0]
for row_n in range(1 , len(_UpperCAmelCase ) ):
UpperCAmelCase_ = grid[row_n]
UpperCAmelCase_ = fill_row(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = grid[row_n]
return grid[-1][-1]
def A (__A : Tuple , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __snake_case ( lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = """donut-swin"""
UpperCAmelCase__ : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , _snake_case : Union[str, Any]=224 , _snake_case : Optional[int]=4 , _snake_case : Optional[int]=3 , _snake_case : str=96 , _snake_case : Optional[Any]=[2, 2, 6, 2] , _snake_case : Optional[int]=[3, 6, 12, 24] , _snake_case : Optional[Any]=7 , _snake_case : Optional[int]=4.0 , _snake_case : List[str]=True , _snake_case : int=0.0 , _snake_case : Optional[int]=0.0 , _snake_case : Any=0.1 , _snake_case : Optional[Any]="gelu" , _snake_case : Tuple=False , _snake_case : Any=0.0_2 , _snake_case : Dict=1e-5 , **_snake_case : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(lowerCAmelCase__)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(lowerCAmelCase__) - 1))
| 365 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : Optional[int] = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
snake_case_ : Tuple = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
snake_case_ : Optional[int] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def A (__A : Dict , __A : Union[str, Any] , __A : Dict=False , __A : List[Any]=False , __A : Tuple=True , __A : List[Any]=False , __A : List[Any]="dummy_doc" ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = {doc: key_lines}
UpperCAmelCase_ = {doc: sys_lines}
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ , UpperCAmelCase_ = reader.get_doc_mentions(UpperCamelCase__ , key_doc_lines[doc] , UpperCamelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_ = reader.set_annotated_parse_trees(UpperCamelCase__ , key_doc_lines[doc] , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = reader.get_doc_mentions(UpperCamelCase__ , sys_doc_lines[doc] , UpperCamelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_ = reader.set_annotated_parse_trees(UpperCamelCase__ , key_doc_lines[doc] , UpperCamelCase__ , UpperCamelCase__ )
if remove_nested:
UpperCAmelCase_ , UpperCAmelCase_ = reader.remove_nested_coref_mentions(UpperCamelCase__ , UpperCamelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase_ , UpperCAmelCase_ = reader.remove_nested_coref_mentions(UpperCamelCase__ , UpperCamelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase_ = reader.get_mention_assignments(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = reader.get_mention_assignments(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def A (__A : Tuple , __A : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : str , __A : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = get_coref_infos(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for name, metric in metrics:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = evaluator.evaluate_documents(UpperCamelCase__ , UpperCamelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 100:.2f}""" , F""" Precision: {precision * 100:.2f}""" , F""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase_ = (conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def A (__A : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase_ = line.split()[5]
if not parse_col == "-":
UpperCAmelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Dict=True , _snake_case : List[str]=False , _snake_case : int=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase_ = util.check_gold_parse_annotation(__lowerCamelCase)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase_ = evaluate(
key_lines=__lowerCamelCase , sys_lines=__lowerCamelCase , metrics=__lowerCamelCase , NP_only=__lowerCamelCase , remove_nested=__lowerCamelCase , keep_singletons=__lowerCamelCase , min_span=__lowerCamelCase , )
return score
| 366 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 0 |
from __future__ import annotations
import math
def A (__A : list , __A : list ) -> list:
"""simple docstring"""
if len(__lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
UpperCAmelCase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def A (__A : list , __A : list ) -> Dict:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def A (__A : list , __A : list ) -> Tuple:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def A (__A : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
UpperCAmelCase_ = len(__lowerCamelCase )
UpperCAmelCase_ = matrix_length // 2
UpperCAmelCase_ = [[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase )]
UpperCAmelCase_ = [
[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )
]
UpperCAmelCase_ = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase )]
UpperCAmelCase_ = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def A (__A : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__lowerCamelCase ), len(matrix[0] )
def A (__A : list ) -> None:
"""simple docstring"""
print('''\n'''.join(str(__lowerCamelCase ) for line in matrix ) )
def A (__A : list , __A : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = split_matrix(__lowerCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = split_matrix(__lowerCamelCase )
UpperCAmelCase_ = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase_ = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
UpperCAmelCase_ = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
UpperCAmelCase_ = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase_ = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase_ = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase_ = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase_ = matrix_addition(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
UpperCAmelCase_ = matrix_addition(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = matrix_addition(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
# construct the new matrix from our 4 quadrants
UpperCAmelCase_ = []
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def A (__A : list , __A : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__lowerCamelCase )[1] != matrix_dimensions(__lowerCamelCase )[0]:
UpperCAmelCase_ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowerCamelCase )
UpperCAmelCase_ = matrix_dimensions(__lowerCamelCase )
UpperCAmelCase_ = matrix_dimensions(__lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase_ = max(*__lowerCamelCase , *__lowerCamelCase )
UpperCAmelCase_ = int(math.pow(2 , math.ceil(math.loga(__lowerCamelCase ) ) ) )
UpperCAmelCase_ = matrixa
UpperCAmelCase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase_ = actual_strassen(__lowerCamelCase , __lowerCamelCase )
# Removing the additional zeros
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
snake_case_ : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
snake_case_ : Dict = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 367 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
snake_case_ : Any = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
snake_case_ : Dict = {
"facebook/blenderbot_small-90M": 512,
}
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = BlenderbotSmallTokenizer
def __init__( self : Optional[Any] , _snake_case : List[Any]=None , _snake_case : str=None , _snake_case : Optional[Any]="<|endoftext|>" , _snake_case : List[Any]="<|endoftext|>" , _snake_case : Dict="<|endoftext|>" , _snake_case : List[str]=False , _snake_case : List[str]=True , **_snake_case : Optional[Any] , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=_a , merges=_a , add_prefix_space=_a , trim_offsets=_a , ) , bos_token=_a , eos_token=_a , unk_token=_a , **_a , )
UpperCAmelCase_ = add_prefix_space
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Optional[int]=None):
"""simple docstring"""
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : int , _snake_case : Any , _snake_case : Tuple = None):
"""simple docstring"""
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 368 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
snake_case_ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
snake_case_ : List[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def A (__A : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__A )[0]
@deprecated(__A , '''Please use tf.data to implement this functionality.''' )
def A (__A : str ) -> int:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__A ) as bytestream:
UpperCAmelCase_ = _readaa(__A )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
UpperCAmelCase_ = _readaa(__A )
UpperCAmelCase_ = _readaa(__A )
UpperCAmelCase_ = _readaa(__A )
UpperCAmelCase_ = bytestream.read(rows * cols * num_images )
UpperCAmelCase_ = numpy.frombuffer(__A , dtype=numpy.uinta )
UpperCAmelCase_ = data.reshape(__A , __A , __A , 1 )
return data
@deprecated(__A , '''Please use tf.one_hot on tensors.''' )
def A (__A : int , __A : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = labels_dense.shape[0]
UpperCAmelCase_ = numpy.arange(__A ) * num_classes
UpperCAmelCase_ = numpy.zeros((num_labels, num_classes) )
UpperCAmelCase_ = 1
return labels_one_hot
@deprecated(__A , '''Please use tf.data to implement this functionality.''' )
def A (__A : Optional[Any] , __A : Optional[int]=False , __A : Tuple=10 ) -> Tuple:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__A ) as bytestream:
UpperCAmelCase_ = _readaa(__A )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
UpperCAmelCase_ = _readaa(__A )
UpperCAmelCase_ = bytestream.read(__A )
UpperCAmelCase_ = numpy.frombuffer(__A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__A , __A )
return labels
class __snake_case :
@deprecated(
_a , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : str=False , _snake_case : Union[str, Any]=False , _snake_case : Optional[int]=dtypes.floataa , _snake_case : Optional[Any]=True , _snake_case : str=None , ):
"""simple docstring"""
UpperCAmelCase_ = random_seed.get_seed(_a)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
UpperCAmelCase_ = dtypes.as_dtype(_a).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
UpperCAmelCase_ = 10000
UpperCAmelCase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
UpperCAmelCase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCAmelCase_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCAmelCase_ = images.astype(numpy.floataa)
UpperCAmelCase_ = numpy.multiply(_a , 1.0 / 255.0)
UpperCAmelCase_ = images
UpperCAmelCase_ = labels
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self._images
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self._labels
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
return self._num_examples
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._epochs_completed
def lowerCamelCase ( self : Tuple , _snake_case : List[str] , _snake_case : List[str]=False , _snake_case : Optional[int]=True):
"""simple docstring"""
if fake_data:
UpperCAmelCase_ = [1] * 784
UpperCAmelCase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_a)],
[fake_label for _ in range(_a)],
)
UpperCAmelCase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCAmelCase_ = numpy.arange(self._num_examples)
numpy.random.shuffle(_a)
UpperCAmelCase_ = self.images[perma]
UpperCAmelCase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCAmelCase_ = self._num_examples - start
UpperCAmelCase_ = self._images[start : self._num_examples]
UpperCAmelCase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCAmelCase_ = numpy.arange(self._num_examples)
numpy.random.shuffle(_a)
UpperCAmelCase_ = self.images[perm]
UpperCAmelCase_ = self.labels[perm]
# Start next epoch
UpperCAmelCase_ = 0
UpperCAmelCase_ = batch_size - rest_num_examples
UpperCAmelCase_ = self._index_in_epoch
UpperCAmelCase_ = self._images[start:end]
UpperCAmelCase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
UpperCAmelCase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__A , '''Please write your own downloading logic.''' )
def A (__A : Tuple , __A : Union[str, Any] , __A : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not gfile.Exists(__A ):
gfile.MakeDirs(__A )
UpperCAmelCase_ = os.path.join(__A , __A )
if not gfile.Exists(__A ):
urllib.request.urlretrieve(__A , __A ) # noqa: S310
with gfile.GFile(__A ) as f:
UpperCAmelCase_ = f.size()
print('''Successfully downloaded''' , __A , __A , '''bytes.''' )
return filepath
@deprecated(
__A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def A (__A : Dict , __A : Union[str, Any]=False , __A : Union[str, Any]=False , __A : Optional[int]=dtypes.floataa , __A : Union[str, Any]=True , __A : Optional[int]=5000 , __A : List[Any]=None , __A : Union[str, Any]=DEFAULT_SOURCE_URL , ) -> Tuple:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__A , one_hot=__A , dtype=__A , seed=__A )
UpperCAmelCase_ = fake()
UpperCAmelCase_ = fake()
UpperCAmelCase_ = fake()
return _Datasets(train=__A , validation=__A , test=__A )
if not source_url: # empty string check
UpperCAmelCase_ = DEFAULT_SOURCE_URL
UpperCAmelCase_ = "train-images-idx3-ubyte.gz"
UpperCAmelCase_ = "train-labels-idx1-ubyte.gz"
UpperCAmelCase_ = "t10k-images-idx3-ubyte.gz"
UpperCAmelCase_ = "t10k-labels-idx1-ubyte.gz"
UpperCAmelCase_ = _maybe_download(
__A , __A , source_url + train_images_file )
with gfile.Open(__A , '''rb''' ) as f:
UpperCAmelCase_ = _extract_images(__A )
UpperCAmelCase_ = _maybe_download(
__A , __A , source_url + train_labels_file )
with gfile.Open(__A , '''rb''' ) as f:
UpperCAmelCase_ = _extract_labels(__A , one_hot=__A )
UpperCAmelCase_ = _maybe_download(
__A , __A , source_url + test_images_file )
with gfile.Open(__A , '''rb''' ) as f:
UpperCAmelCase_ = _extract_images(__A )
UpperCAmelCase_ = _maybe_download(
__A , __A , source_url + test_labels_file )
with gfile.Open(__A , '''rb''' ) as f:
UpperCAmelCase_ = _extract_labels(__A , one_hot=__A )
if not 0 <= validation_size <= len(__A ):
UpperCAmelCase_ = (
"Validation size should be between 0 and "
F"""{len(__A )}. Received: {validation_size}."""
)
raise ValueError(__A )
UpperCAmelCase_ = train_images[:validation_size]
UpperCAmelCase_ = train_labels[:validation_size]
UpperCAmelCase_ = train_images[validation_size:]
UpperCAmelCase_ = train_labels[validation_size:]
UpperCAmelCase_ = {"dtype": dtype, "reshape": reshape, "seed": seed}
UpperCAmelCase_ = _DataSet(__A , __A , **__A )
UpperCAmelCase_ = _DataSet(__A , __A , **__A )
UpperCAmelCase_ = _DataSet(__A , __A , **__A )
return _Datasets(train=__A , validation=__A , test=__A )
| 369 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : List[str]=13 , _snake_case : Any=7 , _snake_case : Union[str, Any]=True , _snake_case : Any=True , _snake_case : Dict=True , _snake_case : Dict=True , _snake_case : List[str]=99 , _snake_case : Dict=16 , _snake_case : Tuple=36 , _snake_case : int=6 , _snake_case : Tuple=6 , _snake_case : Any=6 , _snake_case : Any=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[int]=512 , _snake_case : Union[str, Any]=16 , _snake_case : Any=2 , _snake_case : List[Any]=0.0_2 , _snake_case : Dict=3 , _snake_case : List[Any]=4 , _snake_case : List[str]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase ( self : Dict , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_)
UpperCAmelCase_ = model(snake_case_ , token_type_ids=snake_case_)
UpperCAmelCase_ = model(snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = AlbertForPreTraining(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , sentence_order_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def lowerCamelCase ( self : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : int , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = AlbertForMaskedLM(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : Tuple , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Any , _snake_case : int , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AlbertForQuestionAnswering(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : str , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) = config_and_inputs
UpperCAmelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _a , _a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
def lowerCamelCase ( self : Any , _snake_case : List[str] , _snake_case : Any , _snake_case : List[Any]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_)
if return_labels:
if model_class in get_values(snake_case_):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_)
return inputs_dict
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=snake_case_ , hidden_size=37)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*snake_case_)
@slow
def lowerCamelCase ( self : str):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''')
UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_)[0]
UpperCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , snake_case_)
UpperCAmelCase_ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4))
| 370 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Tuple , _snake_case : List[str]=13 , _snake_case : Dict=30 , _snake_case : Union[str, Any]=2 , _snake_case : Tuple=3 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : List[str]=32 , _snake_case : Dict=5 , _snake_case : Optional[int]=4 , _snake_case : Optional[Any]=37 , _snake_case : Dict="gelu" , _snake_case : List[Any]=0.1 , _snake_case : List[str]=0.1 , _snake_case : Optional[int]=10 , _snake_case : Union[str, Any]=0.0_2 , _snake_case : Union[str, Any]=None , _snake_case : str=2 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase ( self : str , _snake_case : Dict , _snake_case : int , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = ViTModel(config=__lowercase)
model.to(__lowercase)
model.eval()
UpperCAmelCase_ = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : str , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = ViTForMaskedImageModeling(config=__lowercase)
model.to(__lowercase)
model.eval()
UpperCAmelCase_ = model(__lowercase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = ViTForMaskedImageModeling(__lowercase)
model.to(__lowercase)
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(__lowercase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def lowerCamelCase ( self : Any , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = ViTForImageClassification(__lowercase)
model.to(__lowercase)
model.eval()
UpperCAmelCase_ = model(__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = ViTForImageClassification(__lowercase)
model.to(__lowercase)
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ViTModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear))
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowercase)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ViTModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''').to(__lowercase)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__lowercase , return_tensors='''pt''').to(__lowercase)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__lowercase)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __lowercase)
UpperCAmelCase_ = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6]).to(__lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4))
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = ViTModel.from_pretrained('''facebook/dino-vits8''').to(__lowercase)
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480)
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__lowercase , return_tensors='''pt''')
UpperCAmelCase_ = inputs.pixel_values.to(__lowercase)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(__lowercase , interpolate_pos_encoding=__lowercase)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , __lowercase)
UpperCAmelCase_ = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]]).to(__lowercase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__lowercase , return_tensors='''pt''')
UpperCAmelCase_ = inputs.pixel_values.to(__lowercase)
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ = model(__lowercase)
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class __snake_case ( __UpperCamelCase ):
UpperCAmelCase__ : int = '''efficientnet'''
def __init__( self : Tuple , _snake_case : int = 3 , _snake_case : int = 600 , _snake_case : float = 2.0 , _snake_case : float = 3.1 , _snake_case : int = 8 , _snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , _snake_case : List[int] = [32, 16, 24, 40, 80, 112, 192] , _snake_case : List[int] = [16, 24, 40, 80, 112, 192, 320] , _snake_case : List[int] = [] , _snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , _snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , _snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , _snake_case : float = 0.2_5 , _snake_case : str = "swish" , _snake_case : int = 2560 , _snake_case : str = "mean" , _snake_case : float = 0.0_2 , _snake_case : float = 0.0_0_1 , _snake_case : float = 0.9_9 , _snake_case : float = 0.5 , _snake_case : float = 0.2 , **_snake_case : int , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = width_coefficient
UpperCAmelCase_ = depth_coefficient
UpperCAmelCase_ = depth_divisor
UpperCAmelCase_ = kernel_sizes
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = out_channels
UpperCAmelCase_ = depthwise_padding
UpperCAmelCase_ = strides
UpperCAmelCase_ = num_block_repeats
UpperCAmelCase_ = expand_ratios
UpperCAmelCase_ = squeeze_expansion_ratio
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = pooling_type
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = batch_norm_eps
UpperCAmelCase_ = batch_norm_momentum
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = drop_connect_rate
UpperCAmelCase_ = sum(_snake_case) * 4
class __snake_case ( __UpperCamelCase ):
UpperCAmelCase__ : List[str] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return 1e-5
| 350 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 0 |
def A (__A : Any , __A : Optional[Any] ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 0 |
from PIL import Image
def A (__A : Tuple , __A : Optional[int] ) -> str:
"""simple docstring"""
def brightness(__A : Optional[Any] ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(lowerCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
snake_case_ : Union[str, Any] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 352 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , _snake_case : List[str] , _snake_case : List[Any]=13 , _snake_case : Dict=3 , _snake_case : List[Any]=224 , _snake_case : Any=30 , _snake_case : Optional[Any]=400 , _snake_case : Union[str, Any]=True , _snake_case : Tuple=None , _snake_case : Optional[Any]=True , _snake_case : Dict=[0.5, 0.5, 0.5] , _snake_case : str=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = EfficientFormerImageProcessorTester(self)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''size'''))
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_lowerCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_lowerCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_lowerCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 0 |
from __future__ import annotations
def __A (__A : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def __A (__A : list[int | str] , __A : list[int | str] , __A : int , __A : list[int] , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCAmelCase_ = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
UpperCAmelCase_ = False
snake_case_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 354 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def A (__A : Optional[Any] ) -> int:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def A () -> List[str]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = 'mock-s3-bucket'
UpperCAmelCase_ = F"""s3://{mock_bucket}"""
UpperCAmelCase_ = extract_path_from_uri(__A )
assert dataset_path.startswith('''s3://''' ) is False
UpperCAmelCase_ = './local/path'
UpperCAmelCase_ = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def A (__A : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = is_remote_filesystem(__A )
assert is_remote is True
UpperCAmelCase_ = fsspec.filesystem('''file''' )
UpperCAmelCase_ = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __A )
def A (__A : Union[str, Any] , __A : Optional[int] , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
UpperCAmelCase_ = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_ = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
UpperCAmelCase_ = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
UpperCAmelCase_ = os.path.basename(__A )
UpperCAmelCase_ = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__A , '''r''' , encoding='''utf-8''' ) as f, open(__A , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def A (__A : Any , __A : Dict , __A : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
UpperCAmelCase_ = compressed_file_paths[protocol]
UpperCAmelCase_ = 'dataset.jsonl'
UpperCAmelCase_ = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase_ = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def A (__A : int , __A : str , __A : Any , __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = hf_api.dataset_info(__A , token=__A )
UpperCAmelCase_ = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__A ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 355 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
snake_case_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
snake_case_ : List[Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/'''))
UpperCAmelCase_ = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase__ , '''src/transformers/models/bert/modeling_bert.py''') , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''') , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir)
def lowerCamelCase ( self : List[str] , _snake_case : Dict , _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str]=None):
"""simple docstring"""
UpperCAmelCase_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
UpperCAmelCase_ = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__)
UpperCAmelCase_ = os.path.join(self.transformer_dir , '''new_code.py''')
with open(lowerCamelCase__ , '''w''' , newline='''\n''') as f:
f.write(lowerCamelCase__)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__)
with open(lowerCamelCase__ , '''r''') as f:
self.assertTrue(f.read() , lowerCamelCase__)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''')
self.assertEqual(lowerCamelCase__ , lowerCamelCase__)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , lowerCamelCase__) , )
# Copy consistency with a really long name
UpperCAmelCase_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , lowerCamelCase__ , lowerCamelCase__) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , lowerCamelCase__ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , lowerCamelCase__) , )
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
UpperCAmelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
UpperCAmelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
UpperCAmelCase_ = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['''format_model_list'''])
self.assertFalse(lowerCamelCase__)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__)
UpperCAmelCase_ = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['''format_model_list'''])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase__)
UpperCAmelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
UpperCAmelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme['''format_model_list'''])
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase__ , lowerCamelCase__)
| 356 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Tuple = "▁"
snake_case_ : List[str] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Any = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : List[str] = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
snake_case_ : Optional[Any] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __snake_case ( lowerCamelCase__ ):
UpperCAmelCase__ : List[str] = ["input_ids"]
UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = RESOURCE_FILES_NAMES
def __init__( self : Dict , _snake_case : int , _snake_case : Any=None , _snake_case : Optional[Any]=False , _snake_case : Tuple="utf8" , _snake_case : Dict="[UNK]" , _snake_case : Optional[Any]="[SEP]" , _snake_case : int="[PAD]" , _snake_case : List[Any]="[CLS]" , _snake_case : Optional[int]="[MASK]" , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , vocab_file=lowercase__ , encoding=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = sentencepiece_model_ckpt
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase__)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ = self.load_vocab(filepath=lowercase__)
else:
UpperCAmelCase_ = {self.sp_model.id_to_piece(lowercase__): id for id in range(self.sp_model.get_piece_size())}
UpperCAmelCase_ = {v: k for k, v in self.vocab.items()}
def lowerCamelCase ( self : str , _snake_case : Optional[int]):
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ = self.tokenize(lowercase__)
UpperCAmelCase_ , UpperCAmelCase_ = '''''', []
for i, ch in enumerate(lowercase__):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ = self.SP_CHAR_MAPPING.get(lowercase__)
else:
UpperCAmelCase_ = unicodedata.normalize('''NFKC''' , lowercase__)
if self.is_whitespace(lowercase__):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase__))
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ = token[1:]
UpperCAmelCase_ = text[offset:].index(lowercase__) + offset
UpperCAmelCase_ = start + len(lowercase__)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
UpperCAmelCase_ = end
return token_mapping
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return len(self.vocab)
def lowerCamelCase ( self : int):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : str , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def lowerCamelCase ( self : str , _snake_case : List[str]):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(lowercase__ , lowercase__) for c in text))
def lowerCamelCase ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=False , _snake_case : Any=64 , _snake_case : Any=0.1):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''') is True:
UpperCAmelCase_ = True
if self.sp_model_kwargs.get('''alpha''') is not None:
UpperCAmelCase_ = self.sp_model_kwargs.get('''alpha''')
if self.sp_model_kwargs.get('''nbest_size''') is not None:
UpperCAmelCase_ = self.sp_model_kwargs.get('''nbest_size''')
if not enable_sampling:
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(lowercase__)
else:
UpperCAmelCase_ = self.sp_model.SampleEncodeAsPieces(lowercase__ , lowercase__ , lowercase__)
UpperCAmelCase_ = []
for pi, piece in enumerate(lowercase__):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase__) and pi != 0:
new_pieces.append(lowercase__)
continue
else:
continue
UpperCAmelCase_ = 0
for i, chunk in enumerate(lowercase__):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase__) or self.is_punct(lowercase__):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(lowercase__)
UpperCAmelCase_ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
UpperCAmelCase_ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
UpperCAmelCase_ = i
if len(lowercase__) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def lowerCamelCase ( self : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = ''''''.join(lowercase__).replace(lowercase__ , ''' ''').strip()
return out_string
def lowerCamelCase ( self : List[str] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.convert_ids_to_tokens(lowercase__)
UpperCAmelCase_ = ''''''.join(lowercase__).replace(lowercase__ , ''' ''').strip()
return out_string
def lowerCamelCase ( self : Tuple , _snake_case : List[str]):
"""simple docstring"""
return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token))
def lowerCamelCase ( self : int , _snake_case : Optional[int]):
"""simple docstring"""
return self.reverse_vocab.get(lowercase__ , self.unk_token)
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : Dict=None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCamelCase ( self : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any]=None):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__)) + [1, 1] + ([0] * len(lowercase__)) + [1]
return [1] + ([0] * len(lowercase__)) + [1]
def lowerCamelCase ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase__) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase__) + 1) + [1] * (len(lowercase__) + 3)
def lowerCamelCase ( self : Any , _snake_case : int):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCamelCase ( self : Dict , _snake_case : Tuple):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase__) == 1:
UpperCAmelCase_ = unicodedata.category(lowercase__)
if cat == "Zs":
return True
return False
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = {}
with io.open(lowercase__ , '''r''' , encoding='''utf-8''') as f:
for index, line in enumerate(lowercase__):
UpperCAmelCase_ = line.rstrip('''\n''')
UpperCAmelCase_ = int(lowercase__)
return token_to_idx
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(lowercase__):
UpperCAmelCase_ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCAmelCase_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(lowercase__ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case: kv[1]):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''')
UpperCAmelCase_ = token_index
writer.write(token + '''\n''')
index += 1
UpperCAmelCase_ = os.path.join(lowercase__ , '''sentencepiece.bpe.model''')
with open(lowercase__ , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase__)
return (vocab_file,)
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : Dict , _snake_case : Tuple , _snake_case : Any=13 , _snake_case : Any=[30, 30] , _snake_case : Union[str, Any]=2 , _snake_case : Tuple=3 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=True , _snake_case : int=32 , _snake_case : List[str]=5 , _snake_case : Union[str, Any]=4 , _snake_case : Any=37 , _snake_case : List[Any]="gelu" , _snake_case : List[str]=0.1 , _snake_case : int=0.1 , _snake_case : Optional[Any]=10 , _snake_case : Dict=0.0_2 , _snake_case : str=3 , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=8 , _snake_case : List[Any]=10 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = n_targets
UpperCAmelCase_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase_ = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
UpperCAmelCase_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase_ = []
for i in range(self.batch_size):
UpperCAmelCase_ = {}
UpperCAmelCase_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_snake_case)
UpperCAmelCase_ = torch.rand(self.n_targets , 4 , device=_snake_case)
labels.append(_snake_case)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = YolosModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def lowerCamelCase ( self : int , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = YolosForObjectDetection(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(pixel_values=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
UpperCAmelCase_ = model(pixel_values=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCAmelCase__ : str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
def lowerCamelCase ( self : int , _snake_case : Any , _snake_case : List[str] , _snake_case : List[str]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase_ = []
for i in range(self.model_tester.batch_size):
UpperCAmelCase_ = {}
UpperCAmelCase_ = torch.ones(
size=(self.model_tester.n_targets,) , device=_snake_case , dtype=torch.long)
UpperCAmelCase_ = torch.ones(
self.model_tester.n_targets , 4 , device=_snake_case , dtype=torch.float)
labels.append(_snake_case)
UpperCAmelCase_ = labels
return inputs_dict
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = YolosModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear))
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
# in YOLOS, the seq_len is different
UpperCAmelCase_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase_ = len(_snake_case)
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = 1
self.assertEqual(out_len + added_hidden_states , len(_snake_case))
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Any , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_snake_case) , _snake_case)
# YOLOS has a different seq_length
UpperCAmelCase_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_snake_case)
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = YolosModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Any):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''').to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''pt''').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(inputs.pixel_values)
# verify outputs
UpperCAmelCase_ = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_snake_case , )
UpperCAmelCase_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _snake_case , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _snake_case , atol=1e-4))
# verify postprocessing
UpperCAmelCase_ = image_processor.post_process_object_detection(
_snake_case , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
UpperCAmelCase_ = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1]).to(_snake_case)
UpperCAmelCase_ = [75, 75, 17, 63, 17]
UpperCAmelCase_ = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5]).to(_snake_case)
self.assertEqual(len(results['''scores''']) , 5)
self.assertTrue(torch.allclose(results['''scores'''] , _snake_case , atol=1e-4))
self.assertSequenceEqual(results['''labels'''].tolist() , _snake_case)
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _snake_case))
| 358 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
snake_case_ : Any = None
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : Dict = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
snake_case_ : Union[str, Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __snake_case ( _lowercase ):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : Union[str, Any] = TaTokenizer
UpperCAmelCase__ : int = []
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Optional[Any]="</s>" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="<pad>" , _snake_case : str=100 , _snake_case : int=None , **_snake_case : Optional[int] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(__UpperCamelCase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ = len(set(filter(lambda _snake_case: bool('''extra_id_''' in str(__UpperCamelCase)) , __UpperCamelCase)))
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''')
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , extra_ids=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = extra_ids
@staticmethod
def lowerCamelCase ( _snake_case : str , _snake_case : List[Any] , _snake_case : str):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __UpperCamelCase , )
return max_model_length
def lowerCamelCase ( self : int , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__UpperCamelCase):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
UpperCAmelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCamelCase):
copyfile(self.vocab_file , __UpperCamelCase)
logger.info(F"""Copy vocab file to {out_vocab_file}""")
return (out_vocab_file,)
def lowerCamelCase ( self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCamelCase ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return list(
set(filter(lambda _snake_case: bool(re.search(r'''<extra_id_\d+>''' , __UpperCamelCase)) is not None , self.additional_special_tokens)))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return [self.convert_tokens_to_ids(__UpperCamelCase) for token in self.get_sentinel_tokens()]
| 359 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Union[str, Any] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : Dict = logging.get_logger(__name__)
class __snake_case ( __lowercase ):
UpperCAmelCase__ : Any = '''maskformer'''
UpperCAmelCase__ : List[Any] = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ : List[Any] = ['''resnet''', '''swin''']
UpperCAmelCase__ : List[str] = ['''detr''']
def __init__( self : Optional[int] , _snake_case : int = 256 , _snake_case : int = 256 , _snake_case : float = 0.1 , _snake_case : bool = False , _snake_case : Optional[Dict] = None , _snake_case : Optional[Dict] = None , _snake_case : float = 0.0_2 , _snake_case : float = 1.0 , _snake_case : float = 1.0 , _snake_case : float = 1.0 , _snake_case : float = 2_0.0 , _snake_case : Optional[bool] = None , **_snake_case : Optional[Any] , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
UpperCAmelCase_ = backbone_config.pop('''model_type''')
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(UpperCAmelCase__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported)}""")
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_ = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_ = (
decoder_config.pop('''model_type''') if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported)}""")
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
UpperCAmelCase_ = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_ = config_class.from_dict(UpperCAmelCase__)
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = decoder_config
# main feature dimension for the model
UpperCAmelCase_ = fpn_feature_size
UpperCAmelCase_ = mask_feature_size
# initializer
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_ = cross_entropy_weight
UpperCAmelCase_ = dice_weight
UpperCAmelCase_ = mask_weight
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = no_object_weight
UpperCAmelCase_ = output_auxiliary_logits
UpperCAmelCase_ = self.decoder_config.encoder_attention_heads
UpperCAmelCase_ = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__)
@classmethod
def lowerCamelCase ( cls : Tuple , _snake_case : PretrainedConfig , _snake_case : PretrainedConfig , **_snake_case : List[str]):
"""simple docstring"""
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.decoder_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 360 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[str] = logging.get_logger(__name__)
def A (__A : int , __A : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def A (__A : Optional[Any] , __A : Any , __A : Any=False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = """"""
else:
UpperCAmelCase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def A (__A : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A (__A : int , __A : int , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = dct.pop(__lowerCAmelCase )
UpperCAmelCase_ = val
def A () -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A (__A : Dict , __A : int , __A : Union[str, Any]=False ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__lowerCAmelCase , )
UpperCAmelCase_ = ViTHybridConfig(backbone_config=__lowerCAmelCase , image_size=384 , num_labels=1000 )
UpperCAmelCase_ = False
# load original model from timm
UpperCAmelCase_ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
UpperCAmelCase_ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ = """huggingface/label-files"""
UpperCAmelCase_ = """imagenet-1k-id2label.json"""
UpperCAmelCase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTHybridModel(__lowerCAmelCase ).eval()
else:
UpperCAmelCase_ = ViTHybridForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=__lowerCAmelCase ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = ViTHybridImageProcessor(
do_resize=__lowerCAmelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowerCAmelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(__lowerCAmelCase ).unsqueeze(0 )
UpperCAmelCase_ = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(__lowerCAmelCase )
UpperCAmelCase_ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase_ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
snake_case_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 361 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def A (__A : Any , __A : List[Any]=1.0 , __A : Any=None , __A : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
UpperCAmelCase_ = global_rng
UpperCAmelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self : int , _snake_case : Optional[Any] , _snake_case : Optional[int]=7 , _snake_case : Optional[int]=400 , _snake_case : Optional[int]=2000 , _snake_case : Any=1 , _snake_case : str=0.0 , _snake_case : int=16000 , _snake_case : int=True , _snake_case : Optional[int]=True , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = min_seq_length
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = padding_value
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = return_attention_mask
UpperCAmelCase_ = do_normalize
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : Any , _snake_case : Any=False , _snake_case : Any=False):
"""simple docstring"""
def _flatten(_snake_case : Dict):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE))
if equal_length:
UpperCAmelCase_ = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
UpperCAmelCase_ = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
UpperCAmelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( _A , unittest.TestCase ):
UpperCAmelCase__ : int = ASTFeatureExtractor
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ASTFeatureExtractionTester(self)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
UpperCAmelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ = feat_extract(speech_inputs[0] , return_tensors='''np''').input_values
UpperCAmelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''').input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3))
# Test batched
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3))
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ = [floats_list((1, x))[0] for x in (800, 800, 800)]
UpperCAmelCase_ = np.asarray(__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3))
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
import torch
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCAmelCase_ = np.random.rand(100).astype(np.floataa)
UpperCAmelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Dict):
"""simple docstring"""
from datasets import load_dataset
UpperCAmelCase_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
UpperCAmelCase_ = ds.sort('''id''').select(range(__SCREAMING_SNAKE_CASE))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9])
# fmt: on
UpperCAmelCase_ = self._load_datasamples(1)
UpperCAmelCase_ = ASTFeatureExtractor()
UpperCAmelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').input_values
self.assertEquals(input_values.shape , (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4))
| 362 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case_ : int = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : int = ['''input_features''']
def __init__( self : Dict , _snake_case : Optional[Any]=80 , _snake_case : Union[str, Any]=16000 , _snake_case : Optional[int]=160 , _snake_case : Dict=30 , _snake_case : str=400 , _snake_case : str=0.0 , _snake_case : Optional[Any]=False , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = n_fft
UpperCAmelCase_ = hop_length
UpperCAmelCase_ = chunk_length
UpperCAmelCase_ = chunk_length * sampling_rate
UpperCAmelCase_ = self.n_samples // hop_length
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def lowerCamelCase ( self : str , _snake_case : np.array):
"""simple docstring"""
UpperCAmelCase_ = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCAmelCase_ = log_spec[:, :-1]
UpperCAmelCase_ = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0)
UpperCAmelCase_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0):
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase_ = np.array(_SCREAMING_SNAKE_CASE , np.intaa)
UpperCAmelCase_ = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1)):
UpperCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
UpperCAmelCase_ = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE)
else:
UpperCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = True , _snake_case : Optional[int] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[str] = "max_length" , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , **_snake_case : Optional[Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
UpperCAmelCase_ = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
UpperCAmelCase_ = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCAmelCase_ = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray):
UpperCAmelCase_ = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa)
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCAmelCase_ = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray([raw_speech]).T]
UpperCAmelCase_ = BatchFeature({'''input_features''': raw_speech})
# convert into correct format for padding
UpperCAmelCase_ = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCAmelCase_ = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCAmelCase_ = np.stack(padded_inputs['''input_features'''] , axis=0)
# make sure list is in array format
UpperCAmelCase_ = padded_inputs.get('''input_features''').transpose(2 , 0 , 1)
UpperCAmelCase_ = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE):
UpperCAmelCase_ = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa) for feature in input_features]
else:
UpperCAmelCase_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCAmelCase_ = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE)
return padded_inputs
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 363 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A (__A : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = re.sub(R'''#.*''' , '''''' , _a ) # remove comments
if line:
filtered_lines.append(_a )
UpperCAmelCase_ = """\n""".join(_a )
# Make a hash from all this code
UpperCAmelCase_ = full_str.encode('''utf-8''' )
return shaaaa(_a ).hexdigest()
# get importable module names and hash for caching
snake_case_ : Optional[Any] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
snake_case_ : Optional[Any] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
snake_case_ : Optional[int] = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
snake_case_ : Union[str, Any] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 364 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 0 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
snake_case_ : Union[str, Any] = 6378137.0
snake_case_ : List[Any] = 6356752.314245
snake_case_ : Dict = 6378137
def A (__A : Optional[int] , __A : Dict , __A : List[str] , __A : Any ) -> float:
"""simple docstring"""
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
UpperCAmelCase_ = radians(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = radians(SCREAMING_SNAKE_CASE_ )
# Equation
UpperCAmelCase_ = sin((phi_a - phi_a) / 2 )
UpperCAmelCase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase_ = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE_ ) * cos(SCREAMING_SNAKE_CASE_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
def A (__A : int = 3 , __A : int = 7 , __A : int = 1000000 ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
for current_denominator in range(1 , limit + 1 ):
UpperCAmelCase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ = current_numerator
UpperCAmelCase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 366 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __snake_case ( _a ):
UpperCAmelCase__ : List[str] = """autoformer"""
UpperCAmelCase__ : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "student_t" , _snake_case : str = "nll" , _snake_case : int = 1 , _snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , _snake_case : bool = True , _snake_case : int = 0 , _snake_case : int = 0 , _snake_case : int = 0 , _snake_case : int = 0 , _snake_case : Optional[List[int]] = None , _snake_case : Optional[List[int]] = None , _snake_case : int = 64 , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 32 , _snake_case : int = 32 , _snake_case : str = "gelu" , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : int = 100 , _snake_case : float = 0.0_2 , _snake_case : bool = True , _snake_case : Optional[int]=True , _snake_case : int = 10 , _snake_case : int = 25 , _snake_case : int = 3 , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length if context_length is not None else prediction_length
UpperCAmelCase_ = distribution_output
UpperCAmelCase_ = loss
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = scaling
UpperCAmelCase_ = num_dynamic_real_features
UpperCAmelCase_ = num_static_real_features
UpperCAmelCase_ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
UpperCAmelCase_ = cardinality
else:
UpperCAmelCase_ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
UpperCAmelCase_ = embedding_dimension
else:
UpperCAmelCase_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
UpperCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ = input_size * len(self.lags_sequence) + self._number_of_features
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = use_cache
# Autoformer
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase)
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 367 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __snake_case ( a ):
@require_torch
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = '''\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '''
UpperCAmelCase_ = '''\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '''
UpperCAmelCase_ = '''\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n '''
# Force fetching the files so that we can use the cache
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowercase_)
BertModel.from_pretrained(lowercase_)
BertTokenizer.from_pretrained(lowercase_)
pipeline(task='''fill-mask''' , model=lowercase_)
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock])]
# should succeed
UpperCAmelCase_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('''success''' , result.stdout.decode())
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '''
UpperCAmelCase_ = '''\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '''
UpperCAmelCase_ = '''\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n '''
# Force fetching the files so that we can use the cache
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowercase_)
BertModel.from_pretrained(lowercase_)
BertTokenizer.from_pretrained(lowercase_)
pipeline(task='''fill-mask''' , model=lowercase_)
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock])]
# should succeed
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('''success''' , result.stdout.decode())
@require_torch
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '''
UpperCAmelCase_ = '''\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n '''
UpperCAmelCase_ = '''\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '''
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run])]
# should succeed
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('''success''' , result.stdout.decode())
# next emulate no network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('''success''' , result.stdout.decode())
@require_torch
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''\nfrom transformers import pipeline\n '''
UpperCAmelCase_ = '''\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n '''
UpperCAmelCase_ = '''\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '''
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run])]
UpperCAmelCase_ = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''') , )
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''\nfrom transformers import AutoModel\n '''
UpperCAmelCase_ = '''\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n '''
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run])]
# should succeed
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('''success''' , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('''success''' , result.stdout.decode())
| 368 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ : List[str] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = 4_2
class __snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , _snake_case : str=3 , _snake_case : Dict=3 , _snake_case : Tuple=("DownEncoderBlock2D",) , _snake_case : Union[str, Any]=(64,) , _snake_case : Optional[Any]=2 , _snake_case : Tuple=32 , _snake_case : int="silu" , _snake_case : Dict=True , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = torch.nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
# down
UpperCAmelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(_SCREAMING_SNAKE_CASE):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(_SCREAMING_SNAKE_CASE) - 1
UpperCAmelCase_ = get_down_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
self.down_blocks.append(_SCREAMING_SNAKE_CASE)
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# out
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_SCREAMING_SNAKE_CASE , eps=1e-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = 2 * out_channels if double_z else out_channels
UpperCAmelCase_ = nn.Convad(block_out_channels[-1] , _SCREAMING_SNAKE_CASE , 3 , padding=1)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = x
UpperCAmelCase_ = self.conv_in(_SCREAMING_SNAKE_CASE)
if self.training and self.gradient_checkpointing:
def create_custom_forward(_snake_case : Optional[Any]):
def custom_forward(*_snake_case : int):
return module(*_SCREAMING_SNAKE_CASE)
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0'''):
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE)
else:
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , _SCREAMING_SNAKE_CASE)
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase_ = down_block(_SCREAMING_SNAKE_CASE)
# middle
UpperCAmelCase_ = self.mid_block(_SCREAMING_SNAKE_CASE)
# post-process
UpperCAmelCase_ = self.conv_norm_out(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = self.conv_act(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = self.conv_out(_SCREAMING_SNAKE_CASE)
return sample
class __snake_case ( nn.Module ):
def __init__( self : Optional[int] , _snake_case : Optional[int]=3 , _snake_case : Optional[Any]=3 , _snake_case : List[Any]=("UpDecoderBlock2D",) , _snake_case : str=(64,) , _snake_case : Optional[Any]=2 , _snake_case : List[str]=32 , _snake_case : Union[str, Any]="silu" , _snake_case : str="group" , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# up
UpperCAmelCase_ = list(reversed(_SCREAMING_SNAKE_CASE))
UpperCAmelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_SCREAMING_SNAKE_CASE):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = reversed_block_out_channels[i]
UpperCAmelCase_ = i == len(_SCREAMING_SNAKE_CASE) - 1
UpperCAmelCase_ = get_up_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , resnet_time_scale_shift=_SCREAMING_SNAKE_CASE , )
self.up_blocks.append(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase_ = SpatialNorm(block_out_channels[0] , _SCREAMING_SNAKE_CASE)
else:
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_SCREAMING_SNAKE_CASE , eps=1e-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Convad(block_out_channels[0] , _SCREAMING_SNAKE_CASE , 3 , padding=1)
UpperCAmelCase_ = False
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : int=None):
"""simple docstring"""
UpperCAmelCase_ = z
UpperCAmelCase_ = self.conv_in(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_snake_case : Union[str, Any]):
def custom_forward(*_snake_case : Dict):
return module(*_SCREAMING_SNAKE_CASE)
return custom_forward
if is_torch_version('''>=''' , '''1.11.0'''):
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = sample.to(_SCREAMING_SNAKE_CASE)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE)
else:
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = sample.to(_SCREAMING_SNAKE_CASE)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
else:
# middle
UpperCAmelCase_ = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = sample.to(_SCREAMING_SNAKE_CASE)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = up_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# post-process
if latent_embeds is None:
UpperCAmelCase_ = self.conv_norm_out(_SCREAMING_SNAKE_CASE)
else:
UpperCAmelCase_ = self.conv_norm_out(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = self.conv_act(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = self.conv_out(_SCREAMING_SNAKE_CASE)
return sample
class __snake_case ( nn.Module ):
def __init__( self : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[str]=None , _snake_case : Any="random" , _snake_case : Tuple=False , _snake_case : Any=True):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = n_e
UpperCAmelCase_ = vq_embed_dim
UpperCAmelCase_ = beta
UpperCAmelCase_ = legacy
UpperCAmelCase_ = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
UpperCAmelCase_ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap)))
UpperCAmelCase_ = self.used.shape[0]
UpperCAmelCase_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase_ = self.re_embed
UpperCAmelCase_ = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""")
else:
UpperCAmelCase_ = n_e
UpperCAmelCase_ = sane_index_shape
def lowerCamelCase ( self : Tuple , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = inds.shape
assert len(_SCREAMING_SNAKE_CASE) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase_ = match.argmax(-1)
UpperCAmelCase_ = match.sum(2) < 1
if self.unknown_index == "random":
UpperCAmelCase_ = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
UpperCAmelCase_ = self.unknown_index
return new.reshape(_SCREAMING_SNAKE_CASE)
def lowerCamelCase ( self : Dict , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = inds.shape
assert len(_SCREAMING_SNAKE_CASE) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_SCREAMING_SNAKE_CASE)
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase_ = 0 # simply set to zero
UpperCAmelCase_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _SCREAMING_SNAKE_CASE)
return back.reshape(_SCREAMING_SNAKE_CASE)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = z.permute(0 , 2 , 3 , 1).contiguous()
UpperCAmelCase_ = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase_ = torch.argmin(torch.cdist(_SCREAMING_SNAKE_CASE , self.embedding.weight) , dim=1)
UpperCAmelCase_ = self.embedding(_SCREAMING_SNAKE_CASE).view(z.shape)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase_ = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
UpperCAmelCase_ = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
UpperCAmelCase_ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
UpperCAmelCase_ = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.remap_to_used(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
UpperCAmelCase_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self : Tuple , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase_ = indices.reshape(shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.unmap_to_all(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = indices.reshape(-1) # flatten again
# get quantized latent vectors
UpperCAmelCase_ = self.embedding(_SCREAMING_SNAKE_CASE)
if shape is not None:
UpperCAmelCase_ = z_q.view(_SCREAMING_SNAKE_CASE)
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class __snake_case ( a ):
def __init__( self : str , _snake_case : Optional[Any] , _snake_case : Optional[Any]=False):
"""simple docstring"""
UpperCAmelCase_ = parameters
UpperCAmelCase_ = torch.chunk(_SCREAMING_SNAKE_CASE , 2 , dim=1)
UpperCAmelCase_ = torch.clamp(self.logvar , -30.0 , 20.0)
UpperCAmelCase_ = deterministic
UpperCAmelCase_ = torch.exp(0.5 * self.logvar)
UpperCAmelCase_ = torch.exp(self.logvar)
if self.deterministic:
UpperCAmelCase_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any] = None):
"""simple docstring"""
UpperCAmelCase_ = randn_tensor(
self.mean.shape , generator=_SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype)
UpperCAmelCase_ = self.mean + self.std * sample
return x
def lowerCamelCase ( self : List[Any] , _snake_case : int=None):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self : int , _snake_case : str , _snake_case : str=[1, 2, 3]):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0])
UpperCAmelCase_ = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=_SCREAMING_SNAKE_CASE)
def lowerCamelCase ( self : int):
"""simple docstring"""
return self.mean
| 370 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : str = "▁"
snake_case_ : int = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
snake_case_ : Union[str, Any] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
snake_case_ : Optional[int] = {
"facebook/s2t-small-librispeech-asr": 1024,
}
snake_case_ : Any = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
snake_case_ : str = {"mustc": MUSTC_LANGS}
class __snake_case ( a_ ):
UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ : Dict = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
def __init__( self : Any , _snake_case : Optional[int] , _snake_case : str , _snake_case : Dict="<s>" , _snake_case : Optional[Any]="</s>" , _snake_case : List[Any]="<pad>" , _snake_case : int="<unk>" , _snake_case : Tuple=False , _snake_case : Tuple=False , _snake_case : Any=None , _snake_case : int=None , _snake_case : Dict = None , **_snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , do_upper_case=_snake_case , do_lower_case=_snake_case , tgt_lang=_snake_case , lang_codes=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
UpperCAmelCase_ = do_upper_case
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = load_json(_snake_case)
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = spm_file
UpperCAmelCase_ = load_spm(_snake_case , self.sp_model_kwargs)
if lang_codes is not None:
UpperCAmelCase_ = lang_codes
UpperCAmelCase_ = LANGUAGES[lang_codes]
UpperCAmelCase_ = [F"""<lang:{lang}>""" for lang in self.langs]
UpperCAmelCase_ = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""") for lang in self.langs}
UpperCAmelCase_ = self.lang_tokens
UpperCAmelCase_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
UpperCAmelCase_ = {}
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return len(self.encoder)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = new_tgt_lang
self.set_tgt_lang_special_tokens(_snake_case)
def lowerCamelCase ( self : int , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ = [lang_code_id]
def lowerCamelCase ( self : List[str] , _snake_case : Dict):
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : int):
"""simple docstring"""
return self.encoder.get(_snake_case , self.encoder[self.unk_token])
def lowerCamelCase ( self : Tuple , _snake_case : Dict):
"""simple docstring"""
return self.decoder.get(_snake_case , self.unk_token)
def lowerCamelCase ( self : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase_ = self.sp_model.decode(_snake_case)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_snake_case)
UpperCAmelCase_ = self.sp_model.decode(_snake_case)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCamelCase ( self : Tuple , _snake_case : Optional[int] , _snake_case : Union[str, Any]=None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : Union[str, Any] = None , _snake_case : Any = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case)
UpperCAmelCase_ = [1] * len(self.prefix_tokens)
UpperCAmelCase_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case)) + suffix_ones
return prefix_ones + ([0] * len(_snake_case)) + ([0] * len(_snake_case)) + suffix_ones
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : Optional[Any] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs)
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : int = None):
"""simple docstring"""
UpperCAmelCase_ = Path(_snake_case)
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
UpperCAmelCase_ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase_ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _snake_case)
if os.path.abspath(self.spm_file) != os.path.abspath(_snake_case) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , _snake_case)
elif not os.path.isfile(self.spm_file):
with open(_snake_case , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_snake_case)
return (str(_snake_case), str(_snake_case))
def A (__A : str , __A : Dict[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def A (__A : str ) -> Dict:
"""simple docstring"""
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def A (__A : int , __A : str ) -> Dict:
"""simple docstring"""
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = "ylacombe/bark-small"
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = "en_speaker_1"
UpperCAmelCase_ = "This is a test string"
UpperCAmelCase_ = "speaker_embeddings_path.json"
UpperCAmelCase_ = "speaker_embeddings"
def lowerCamelCase ( self : Optional[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BarkProcessor(tokenizer=_snake_case)
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ = 35
UpperCAmelCase_ = 2
UpperCAmelCase_ = 8
UpperCAmelCase_ = {
"semantic_prompt": np.ones(_snake_case),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
UpperCAmelCase_ = processor(text=self.input_string , voice_preset=_snake_case)
UpperCAmelCase_ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([])).tolist())
# test loading voice preset from npz file
UpperCAmelCase_ = os.path.join(self.tmpdirname , '''file.npz''')
np.savez(_snake_case , **_snake_case)
UpperCAmelCase_ = processor(text=self.input_string , voice_preset=_snake_case)
UpperCAmelCase_ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([])).tolist())
# test loading voice preset from the hub
UpperCAmelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BarkProcessor(tokenizer=_snake_case)
UpperCAmelCase_ = processor(text=self.input_string)
UpperCAmelCase_ = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 350 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 0 |
import os
from pathlib import Path
def A (__A : List[str] , __A : Union[str, Any] , __A : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
UpperCAmelCase_ = F"""{src_lang}-{tgt_lang}"""
UpperCAmelCase_ = F"""\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
UpperCAmelCase_ = os.path.join(UpperCamelCase__ , '''README.md''' )
print(F"""Generating {path}""" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
snake_case_ : Union[str, Any] = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case_ : List[str] = model_name.split("-")
snake_case_ : Optional[int] = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 351 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 0 |
def A (__A : bytes ) -> Optional[Any]:
"""simple docstring"""
return "".join([hex(UpperCAmelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase_ )] )
def A (__A : str ) -> Optional[int]:
"""simple docstring"""
if (len(UpperCAmelCase_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCAmelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 0 |
"""simple docstring"""
import math
def A (__A : int ) -> bool:
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase_ = range(3 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A (__A : Any , __A : List[str]=1 , **__A : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = factor * value
UpperCAmelCase_ = value
while not is_prime(_lowerCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCAmelCase )
return value
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 0 |
from __future__ import annotations
from math import pi
def __A (__A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 0 |
def A (__A : Optional[Any] ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__UpperCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 355 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def A (__A : List[Any] ) -> Tuple:
"""simple docstring"""
if "cls_token" in name:
UpperCAmelCase_ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCAmelCase_ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCAmelCase_ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCAmelCase_ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
UpperCAmelCase_ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
UpperCAmelCase_ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCAmelCase_ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCAmelCase_ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def A (__A : List[Any] , __A : Any ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
UpperCAmelCase_ = config.decoder_hidden_size
UpperCAmelCase_ = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = config.hidden_size
UpperCAmelCase_ = '''vit.encoder.layer.'''
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = val
return orig_state_dict
def A (__A : List[Any] , __A : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
elif "huge" in checkpoint_url:
UpperCAmelCase_ = 14
UpperCAmelCase_ = 1280
UpperCAmelCase_ = 5120
UpperCAmelCase_ = 32
UpperCAmelCase_ = 16
UpperCAmelCase_ = ViTMAEForPreTraining(lowercase_ )
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )['''model''']
UpperCAmelCase_ = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase_ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
UpperCAmelCase_ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCAmelCase_ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
UpperCAmelCase_ = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase_ = image_processor(images=lowercase_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase_ = model(**lowercase_ )
UpperCAmelCase_ = outputs.logits
if "large" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
UpperCAmelCase_ = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
snake_case_ : Optional[int] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 356 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : Dict , _snake_case : Any=13 , _snake_case : Optional[int]=30 , _snake_case : Optional[int]=2 , _snake_case : Any=3 , _snake_case : List[str]=True , _snake_case : List[Any]=True , _snake_case : Tuple=32 , _snake_case : Any=5 , _snake_case : int=4 , _snake_case : Union[str, Any]=37 , _snake_case : Tuple="gelu" , _snake_case : List[str]=0.1 , _snake_case : Dict=0.1 , _snake_case : str=10 , _snake_case : Tuple=0.0_2 , _snake_case : Dict=3 , _snake_case : Any=None , _snake_case : Optional[Any]=2 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 2
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase ( self : int , _snake_case : Any , _snake_case : int , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = DeiTModel(config=lowercase_)
model.to(lowercase_)
model.eval()
UpperCAmelCase_ = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Tuple , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = DeiTForMaskedImageModeling(config=lowercase_)
model.to(lowercase_)
model.eval()
UpperCAmelCase_ = model(lowercase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = DeiTForMaskedImageModeling(lowercase_)
model.to(lowercase_)
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(lowercase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = DeiTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
UpperCAmelCase_ = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = DeiTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ : str = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = DeiTModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowercase_)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Dict=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase ( self : Any):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ = model_class(lowercase_)
model.to(lowercase_)
model.train()
UpperCAmelCase_ = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
UpperCAmelCase_ = model(**lowercase_).loss
loss.backward()
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ = model_class(lowercase_)
model.gradient_checkpointing_enable()
model.to(lowercase_)
model.train()
UpperCAmelCase_ = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
UpperCAmelCase_ = model(**lowercase_).loss
loss.backward()
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_),
*get_values(lowercase_),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}"""):
UpperCAmelCase_ = problem_type['''title''']
UpperCAmelCase_ = problem_type['''num_labels''']
UpperCAmelCase_ = model_class(lowercase_)
model.to(lowercase_)
model.train()
UpperCAmelCase_ = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if problem_type["num_labels"] > 1:
UpperCAmelCase_ = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
UpperCAmelCase_ = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_) as warning_list:
UpperCAmelCase_ = model(**lowercase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DeiTModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A () -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Any):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''')
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''').to(
lowercase_)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowercase_)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
UpperCAmelCase_ = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowercase_ , return_tensors='''pt''')
UpperCAmelCase_ = inputs.pixel_values.to(lowercase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ = model(lowercase_)
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = 4_2
UpperCAmelCase__ : Optional[int] = 4_2
UpperCAmelCase__ : int = None
class __snake_case ( a , a ):
UpperCAmelCase__ : List[str] = 2
@register_to_config
def __init__( self : List[str] , _snake_case : float = 0.0_2 , _snake_case : float = 100 , _snake_case : float = 1.0_0_7 , _snake_case : float = 80 , _snake_case : float = 0.0_5 , _snake_case : float = 50 , ):
"""simple docstring"""
UpperCAmelCase_ = sigma_max
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None # sigma(t_i)
def lowerCamelCase ( self : Any , _snake_case : torch.FloatTensor , _snake_case : Optional[int] = None):
"""simple docstring"""
return sample
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Union[str, torch.device] = None):
"""simple docstring"""
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = np.arange(0 , self.num_inference_steps)[::-1].copy()
UpperCAmelCase_ = torch.from_numpy(__A).to(__A)
UpperCAmelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCAmelCase_ = torch.tensor(__A , dtype=torch.floataa , device=__A)
def lowerCamelCase ( self : List[Any] , _snake_case : torch.FloatTensor , _snake_case : float , _snake_case : Optional[torch.Generator] = None):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
UpperCAmelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ = self.config.s_noise * randn_tensor(sample.shape , generator=__A).to(sample.device)
UpperCAmelCase_ = sigma + gamma * sigma
UpperCAmelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase ( self : Optional[Any] , _snake_case : torch.FloatTensor , _snake_case : float , _snake_case : float , _snake_case : torch.FloatTensor , _snake_case : bool = True , ):
"""simple docstring"""
UpperCAmelCase_ = sample_hat + sigma_hat * model_output
UpperCAmelCase_ = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A)
def lowerCamelCase ( self : Dict , _snake_case : torch.FloatTensor , _snake_case : float , _snake_case : float , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , _snake_case : bool = True , ):
"""simple docstring"""
UpperCAmelCase_ = sample_prev + sigma_prev * model_output
UpperCAmelCase_ = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A)
def lowerCamelCase ( self : Any , _snake_case : List[Any] , _snake_case : int , _snake_case : int):
"""simple docstring"""
raise NotImplementedError()
| 358 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 0 |
"""simple docstring"""
def A (__A : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = int(__A )
if n_element < 1:
UpperCAmelCase_ = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase_ = [1]
UpperCAmelCase_ = (0, 0, 0)
UpperCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
snake_case_ : int = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
snake_case_ : List[str] = hamming(int(n))
print("-----------------------------------------------------")
print(f"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 359 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case ( ctypes.Structure ):
UpperCAmelCase__ : Dict = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def A () -> str:
"""simple docstring"""
if os.name == "nt":
UpperCAmelCase_ = CursorInfo()
UpperCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_a , ctypes.byref(_a ) )
UpperCAmelCase_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_a , ctypes.byref(_a ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def A () -> Any:
"""simple docstring"""
if os.name == "nt":
UpperCAmelCase_ = CursorInfo()
UpperCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_a , ctypes.byref(_a ) )
UpperCAmelCase_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_a , ctypes.byref(_a ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def A () -> Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 360 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 0 |
def A (__A : Optional[int] ) -> list:
"""simple docstring"""
UpperCAmelCase_ = [0] * len(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase_ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase_ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase_ = j
return prefix_result
def A (__A : str ) -> int:
"""simple docstring"""
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
snake_case_ : Optional[Any] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
snake_case_ : int = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
snake_case_ : List[Any] = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : str):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCamelCase ( self : Any , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=None , _snake_case : Union[str, Any]=None , _snake_case : Any="auto" , _snake_case : List[Any]=-1 , _snake_case : List[str]=0.9 , _snake_case : Any=5 , _snake_case : List[Any]=500 , _snake_case : int="gpt2-large" , _snake_case : Optional[Any]=-1 , _snake_case : Any=1024 , _snake_case : Any=25 , _snake_case : Dict=5 , _snake_case : Any=True , _snake_case : List[str]=25 , ):
"""simple docstring"""
UpperCAmelCase_ = compute_mauve(
p_text=lowerCAmelCase__ , q_text=lowerCAmelCase__ , p_features=lowerCAmelCase__ , q_features=lowerCAmelCase__ , p_tokens=lowerCAmelCase__ , q_tokens=lowerCAmelCase__ , num_buckets=lowerCAmelCase__ , pca_max_data=lowerCAmelCase__ , kmeans_explained_var=lowerCAmelCase__ , kmeans_num_redo=lowerCAmelCase__ , kmeans_max_iter=lowerCAmelCase__ , featurize_model_name=lowerCAmelCase__ , device_id=lowerCAmelCase__ , max_text_length=lowerCAmelCase__ , divergence_curve_discretization_size=lowerCAmelCase__ , mauve_scaling_factor=lowerCAmelCase__ , verbose=lowerCAmelCase__ , seed=lowerCAmelCase__ , )
return out
| 362 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __snake_case ( lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = "audio-spectrogram-transformer"
def __init__( self : Dict , _snake_case : int=768 , _snake_case : Tuple=12 , _snake_case : Optional[int]=12 , _snake_case : str=3072 , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[int]=0.0 , _snake_case : Tuple=0.0 , _snake_case : Optional[Any]=0.0_2 , _snake_case : Optional[Any]=1e-12 , _snake_case : Tuple=16 , _snake_case : Optional[Any]=True , _snake_case : str=10 , _snake_case : int=10 , _snake_case : str=1024 , _snake_case : str=128 , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**a__)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = frequency_stride
UpperCAmelCase_ = time_stride
UpperCAmelCase_ = max_length
UpperCAmelCase_ = num_mel_bins
| 363 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Any , _snake_case : Optional[int] , _snake_case : Any=3 , _snake_case : Any=32 , _snake_case : Union[str, Any]=3 , _snake_case : int=10 , _snake_case : Union[str, Any]=[10, 20, 30, 40] , _snake_case : Tuple=[1, 1, 2, 1] , _snake_case : Optional[int]=True , _snake_case : Any=True , _snake_case : Dict="relu" , _snake_case : int=3 , _snake_case : str=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(lowerCAmelCase_)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : str):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : List[Any] , _snake_case : int , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=lowerCAmelCase_)
UpperCAmelCase_ = model(lowerCAmelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , _snake_case : Tuple , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=lowerCAmelCase_)
UpperCAmelCase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase__ : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Any = False
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase_)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Any , _snake_case : Dict , _snake_case : Tuple):
UpperCAmelCase_ = model_class(lowerCAmelCase_)
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
UpperCAmelCase_ = model_class(lowerCAmelCase_)
@jax.jit
def model_jitted(_snake_case : Optional[int] , **_snake_case : Any):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : int):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase_ , return_tensors='''np''')
UpperCAmelCase_ = model(**lowerCAmelCase_)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4))
| 364 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : str = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Optional[Any] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : Dict , __A : List[str] , __A : Dict , __A : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , lowerCAmelCase__ , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
UpperCAmelCase_ = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(lowerCAmelCase__ )
UpperCAmelCase_ = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def A () -> int:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = unused_attributes
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 365 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A (__A : List[str] , __A : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ = model_name.find('''patch''' )
UpperCAmelCase_ = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
UpperCAmelCase_ = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE__ , num_frames=SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
UpperCAmelCase_ = 12
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 16
UpperCAmelCase_ = 24
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = 336
UpperCAmelCase_ = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
UpperCAmelCase_ = 768
return config
def A (__A : Optional[Any] ) -> List[str]:
"""simple docstring"""
if name == "token_embedding.weight":
UpperCAmelCase_ = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
UpperCAmelCase_ = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
UpperCAmelCase_ = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase_ = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase_ = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase_ = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
UpperCAmelCase_ = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
UpperCAmelCase_ = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
UpperCAmelCase_ = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
UpperCAmelCase_ = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
UpperCAmelCase_ = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
UpperCAmelCase_ = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
UpperCAmelCase_ = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
UpperCAmelCase_ = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
UpperCAmelCase_ = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
UpperCAmelCase_ = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
UpperCAmelCase_ = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
UpperCAmelCase_ = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def A (__A : str , __A : Any ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn.in_proj" in key:
UpperCAmelCase_ = key.split('''.''' )
if key.startswith('''visual''' ):
UpperCAmelCase_ = key_split[3]
UpperCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[
:dim
]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
elif key.startswith('''mit''' ):
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(SCREAMING_SNAKE_CASE__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ = val.T
UpperCAmelCase_ = val
return orig_state_dict
def A (__A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if num_frames == 8:
UpperCAmelCase_ = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
UpperCAmelCase_ = '''eating_spaghetti.npy'''
elif num_frames == 32:
UpperCAmelCase_ = '''eating_spaghetti_32_frames.npy'''
UpperCAmelCase_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
UpperCAmelCase_ = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def A (__A : List[Any] , __A : List[Any]=None , __A : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
UpperCAmelCase_ = model_to_url[model_name]
UpperCAmelCase_ = 8
if "16-frames" in model_name:
UpperCAmelCase_ = 16
elif "shot" in model_name:
UpperCAmelCase_ = 32
UpperCAmelCase_ = get_xclip_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = XCLIPModel(SCREAMING_SNAKE_CASE__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ = '''pytorch_model.bin'''
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )['''model''']
UpperCAmelCase_ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = XCLIPModel(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
UpperCAmelCase_ = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = prepare_video(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ = model(**SCREAMING_SNAKE_CASE__ )
# Verify outputs
UpperCAmelCase_ = outputs.logits_per_video
UpperCAmelCase_ = logits_per_video.softmax(dim=1 )
print('''Probs:''' , SCREAMING_SNAKE_CASE__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='''nielsr''' )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='''nielsr''' )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 366 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 0 |
class __snake_case : # Public class to implement a graph
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = row
UpperCAmelCase_ = col
UpperCAmelCase_ = graph
def lowerCamelCase ( self : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Tuple):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCamelCase ( self : int , _snake_case : Optional[int] , _snake_case : int , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase_ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase_ = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _snake_case):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _snake_case)
def lowerCamelCase ( self : List[str]): # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase_ = [[False for j in range(self.COL)] for i in range(self.ROW)]
UpperCAmelCase_ = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_snake_case , _snake_case , _snake_case)
count += 1
return count
| 367 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''ZinengTang/tvlt-base'''
UpperCAmelCase_ = tempfile.mkdtemp()
def lowerCamelCase ( self : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase__)
def lowerCamelCase ( self : Any , **_snake_case : Optional[int]):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase__)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__)
self.assertIsInstance(processor.image_processor , UpperCamelCase__)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
UpperCAmelCase_ = np.ones([12000])
UpperCAmelCase_ = feature_extractor(UpperCamelCase__ , return_tensors='''np''')
UpperCAmelCase_ = processor(audio=UpperCamelCase__ , return_tensors='''np''')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
UpperCAmelCase_ = np.ones([3, 224, 224])
UpperCAmelCase_ = image_processor(UpperCamelCase__ , return_tensors='''np''')
UpperCAmelCase_ = processor(images=UpperCamelCase__ , return_tensors='''np''')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
UpperCAmelCase_ = np.ones([12000])
UpperCAmelCase_ = np.ones([3, 224, 224])
UpperCAmelCase_ = processor(audio=UpperCamelCase__ , images=UpperCamelCase__)
self.assertListEqual(list(inputs.keys()) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__):
processor()
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 368 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
snake_case_ : str = False, False, False
@dataclass
class __snake_case :
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "dict"
UpperCAmelCase__ : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCAmelCase__ : str = field(default='''Audio''' , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def __call__( self : Optional[int]):
"""simple docstring"""
return self.pa_type
def lowerCamelCase ( self : Any , _snake_case : Union[str, bytes, dict]):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""")
def lowerCamelCase ( self : Dict , _snake_case : dict , _snake_case : Optional[Dict[str, Union[str, bool, None]]] = None):
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase ( self : int):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def lowerCamelCase ( self : Tuple , _snake_case : Union[pa.StringArray, pa.StructArray]):
"""simple docstring"""
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def lowerCamelCase ( self : Tuple , _snake_case : pa.StructArray):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_snake_case : int):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 369 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A (__A : str = "" ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
UpperCAmelCase_ = BeautifulSoup(requests.get(a__ ).text , '''html.parser''' )
UpperCAmelCase_ = soup.find_all('''td''' , attrs='''titleColumn''' )
UpperCAmelCase_ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a__ , a__ )
}
def A (__A : str = "IMDb_Top_250_Movies.csv" ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = get_imdb_top_aaa_movies()
with open(a__ , '''w''' , newline='''''' ) as out_file:
UpperCAmelCase_ = csv.writer(a__ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 370 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
snake_case_ : Optional[int] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : List[str] , _snake_case : str = None , _snake_case : uuid.UUID = None , _snake_case : str=None , _snake_case : Optional[Any]=None):
"""simple docstring"""
if not conversation_id:
UpperCAmelCase_ = uuid.uuida()
if past_user_inputs is None:
UpperCAmelCase_ = []
if generated_responses is None:
UpperCAmelCase_ = []
UpperCAmelCase_ = conversation_id
UpperCAmelCase_ = past_user_inputs
UpperCAmelCase_ = generated_responses
UpperCAmelCase_ = text
def __eq__( self : List[str] , _snake_case : Union[str, Any]):
"""simple docstring"""
if not isinstance(_A , _A):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : bool = False):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""")
UpperCAmelCase_ = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""")
else:
UpperCAmelCase_ = text
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
UpperCAmelCase_ = None
def lowerCamelCase ( self : Union[str, Any] , _snake_case : str):
"""simple docstring"""
self.generated_responses.append(_A)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
UpperCAmelCase_ = 'user' if is_user else 'bot'
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
snake_case_ , r'''\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ''' , )
class __snake_case ( snake_case_ ):
def __init__( self : Union[str, Any] , *_snake_case : Dict , **_snake_case : Any):
"""simple docstring"""
super().__init__(*_A , **_A)
if self.tokenizer.pad_token_id is None:
UpperCAmelCase_ = self.tokenizer.eos_token
def lowerCamelCase ( self : int , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , _snake_case : List[Any]=None , **_snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if min_length_for_response is not None:
UpperCAmelCase_ = min_length_for_response
if minimum_tokens is not None:
UpperCAmelCase_ = minimum_tokens
if "max_length" in generate_kwargs:
UpperCAmelCase_ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_A)
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , _snake_case : Union[Conversation, List[Conversation]] , _snake_case : List[Any]=0 , **_snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = super().__call__(_A , num_workers=_A , **_A)
if isinstance(_A , _A) and len(_A) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self : Optional[int] , _snake_case : Conversation , _snake_case : str=32):
"""simple docstring"""
if not isinstance(_A , _A):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''')
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''')
if hasattr(self.tokenizer , '''_build_conversation_input_ids'''):
UpperCAmelCase_ = self.tokenizer._build_conversation_input_ids(_A)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCAmelCase_ = self._legacy_parse_and_tokenize(_A)
if self.framework == "pt":
UpperCAmelCase_ = torch.LongTensor([input_ids])
elif self.framework == "tf":
UpperCAmelCase_ = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : Union[str, Any]=10 , **_snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length)
UpperCAmelCase_ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""")
UpperCAmelCase_ = max_length - minimum_tokens
UpperCAmelCase_ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCAmelCase_ = model_inputs['attention_mask'][:, -trim:]
UpperCAmelCase_ = model_inputs.pop('''conversation''')
UpperCAmelCase_ = max_length
UpperCAmelCase_ = self.model.generate(**_A , **_A)
if self.model.config.is_encoder_decoder:
UpperCAmelCase_ = 1
else:
UpperCAmelCase_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self : List[Any] , _snake_case : List[str] , _snake_case : str=True):
"""simple docstring"""
UpperCAmelCase_ = model_outputs['output_ids']
UpperCAmelCase_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
UpperCAmelCase_ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_A)
return conversation
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Conversation):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.eos_token_id
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_A , add_special_tokens=_A) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(_A , add_special_tokens=_A))
if len(_A) > self.tokenizer.model_max_length:
UpperCAmelCase_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = """The dog is cute and lives in the garden house"""
UpperCAmelCase_ = jnp.array([tokenizer.encode(_snake_case)])
UpperCAmelCase_ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
UpperCAmelCase_ = model(_snake_case)["""last_hidden_state"""]
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 350 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 0 |
def A (__A : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = [int(lowerCAmelCase__ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCAmelCase__ ) == 4 and all(0 <= int(lowerCAmelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
snake_case_ : Optional[Any] = input().strip()
snake_case_ : Optional[int] = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 351 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
def A (__A : nn.ModuleList , __A : nn.ModuleList , __A : List[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__A ) == len(__A ), F"""{len(__A )} != {len(__A )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
snake_case_ : Any = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
snake_case_ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def A (__A : List[Any] , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
try:
UpperCAmelCase_ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__A ) )
def A (__A : int , __A : int ) -> int:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__A ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def A (__A : Union[str, PreTrainedModel] , __A : Union[str, Path] = "student" , __A : Union[int, None] = None , __A : Union[int, None] = None , __A : Dict=False , __A : int=None , __A : Union[str, Any]=None , **__A : str , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__A , __A ):
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A ) # purely for convenience
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__A ).eval()
else:
assert isinstance(__A , __A ), F"""teacher must be a model or string got type {type(__A )}"""
UpperCAmelCase_ = teacher.config.to_diff_dict()
try:
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase_ = teacher_e
if d is None:
UpperCAmelCase_ = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase_ = teacher_e
if d is None:
UpperCAmelCase_ = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__A )
# Copy weights
UpperCAmelCase_ = teacher.config_class(**__A )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_config(__A )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase_ = student.load_state_dict(teacher.state_dict() , strict=__A )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase_ , UpperCAmelCase_ = list(range(__A ) ), list(range(__A ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__A )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase_ = pick_layers_to_copy(__A , __A )
if d_layers_to_copy is None:
UpperCAmelCase_ = pick_layers_to_copy(__A , __A )
try:
if hasattr(
__A , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __A )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __A )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __A )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __A )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __A )
copy_layers(teacher.decoder.block , student.decoder.block , __A )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
UpperCAmelCase_ = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__A )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 352 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ : int = logging.get_logger(__name__)
def A (__A : Any ) -> int:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class __snake_case ( lowercase_ ):
UpperCAmelCase__ : List[Any] = ["pixel_values"]
def __init__( self : Dict , _snake_case : List[Any] = True , _snake_case : Optional[int] = None , _snake_case : List[Any] = PILImageResampling.BILINEAR , _snake_case : Union[str, Any] = True , _snake_case : str = None , _snake_case : List[str] = True , _snake_case : List[Any] = 1 / 255 , _snake_case : Any = True , _snake_case : Any = None , _snake_case : List[str] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**a__)
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ = get_size_dict(a__ , default_to_square=a__)
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ = get_size_dict(a__ , param_name='''crop_size''')
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self : Tuple , _snake_case : List[str] , _snake_case : int , _snake_case : Dict = PILImageResampling.BILINEAR , _snake_case : str = None , **_snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(a__ , default_to_square=a__)
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__)
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""")
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__)
def lowerCamelCase ( self : Any , _snake_case : Dict , _snake_case : Any , _snake_case : Dict = None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(a__)
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have \'height\' and \'width\' as keys. Got {size.keys()}""")
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__)
def lowerCamelCase ( self : Tuple , _snake_case : Dict , _snake_case : str , _snake_case : Tuple = None , **_snake_case : List[Any] , ):
"""simple docstring"""
return rescale(a__ , scale=a__ , data_format=a__ , **a__)
def lowerCamelCase ( self : List[str] , _snake_case : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] = None , **_snake_case : int , ):
"""simple docstring"""
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__)
def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] = None , _snake_case : Optional[int] = None , _snake_case : Dict = None , _snake_case : str = None , _snake_case : Union[str, Any] = None , _snake_case : Dict = None , _snake_case : Union[str, Any] = None , _snake_case : Any = None , _snake_case : Any = None , _snake_case : str = None , _snake_case : str = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(a__)
if do_resize:
UpperCAmelCase_ = self.resize(image=a__ , size=a__ , resample=a__)
if do_center_crop:
UpperCAmelCase_ = self.center_crop(a__ , size=a__)
if do_rescale:
UpperCAmelCase_ = self.rescale(image=a__ , scale=a__)
if do_normalize:
UpperCAmelCase_ = self.normalize(image=a__ , mean=a__ , std=a__)
UpperCAmelCase_ = to_channel_dimension_format(a__ , a__)
return image
def lowerCamelCase ( self : Dict , _snake_case : Tuple , _snake_case : List[Any] = None , _snake_case : Tuple = None , _snake_case : Optional[int] = None , _snake_case : Union[str, Any] = None , _snake_case : Dict = None , _snake_case : List[Any] = None , _snake_case : int = None , _snake_case : List[str] = None , _snake_case : Any = None , _snake_case : Any = None , _snake_case : int = None , _snake_case : Optional[Any] = ChannelDimension.FIRST , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(a__ , default_to_square=a__)
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(a__ , param_name='''crop_size''')
if not valid_images(a__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
UpperCAmelCase_ = make_batched(a__)
UpperCAmelCase_ = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__)
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Any , _snake_case : List[str] , _snake_case : int):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase_) for s in shape])}.npy"""
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase ( self : str , _snake_case : str=0 , _snake_case : Tuple=(4, 4, 64, 64) , _snake_case : Optional[int]=False):
"""simple docstring"""
UpperCAmelCase_ = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase_ , UpperCamelCase_)) , dtype=UpperCamelCase_)
return image
def lowerCamelCase ( self : List[str] , _snake_case : List[Any]=False , _snake_case : Dict="CompVis/stable-diffusion-v1-4"):
"""simple docstring"""
UpperCAmelCase_ = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ = '''bf16''' if fpaa else None
UpperCAmelCase_ , UpperCAmelCase_ = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase_ , subfolder='''unet''' , dtype=UpperCamelCase_ , revision=UpperCamelCase_)
return model, params
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Dict=0 , _snake_case : Tuple=(4, 77, 768) , _snake_case : Tuple=False):
"""simple docstring"""
UpperCAmelCase_ = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase_ , UpperCamelCase_)) , dtype=UpperCamelCase_)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
])
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=UpperCamelCase_)
UpperCAmelCase_ = self.get_latents(UpperCamelCase_ , fpaa=UpperCamelCase_)
UpperCAmelCase_ = self.get_encoder_hidden_states(UpperCamelCase_ , fpaa=UpperCamelCase_)
UpperCAmelCase_ = model.apply(
{'''params''': params} , UpperCamelCase_ , jnp.array(UpperCamelCase_ , dtype=jnp.intaa) , encoder_hidden_states=UpperCamelCase_ , ).sample
assert sample.shape == latents.shape
UpperCAmelCase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
UpperCAmelCase_ = jnp.array(UpperCamelCase_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
])
def lowerCamelCase ( self : List[str] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=UpperCamelCase_)
UpperCAmelCase_ = self.get_latents(UpperCamelCase_ , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase_)
UpperCAmelCase_ = self.get_encoder_hidden_states(UpperCamelCase_ , shape=(4, 77, 1024) , fpaa=UpperCamelCase_)
UpperCAmelCase_ = model.apply(
{'''params''': params} , UpperCamelCase_ , jnp.array(UpperCamelCase_ , dtype=jnp.intaa) , encoder_hidden_states=UpperCamelCase_ , ).sample
assert sample.shape == latents.shape
UpperCAmelCase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
UpperCAmelCase_ = jnp.array(UpperCamelCase_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-2)
| 354 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[str] = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
snake_case_ : Optional[int] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def A (__A : int , __A : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ = int(re.match(R'''.*layer_(\d*).*''' , lowerCamelCase_ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def A (__A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ = re.search(R'''[^\d](\d+)$''' , str(lowerCamelCase_ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
UpperCAmelCase_ = int(bit_search.groups()[0] )
return bit_size // 8
def A (__A : Optional[Any] , __A : Tuple , __A : int , __A : Dict , __A : int ) -> List[str]:
"""simple docstring"""
if bloom_config_file == "":
UpperCAmelCase_ = BloomConfig()
else:
UpperCAmelCase_ = BloomConfig.from_json_file(lowerCamelCase_ )
if shard_model:
UpperCAmelCase_ = os.listdir(lowerCamelCase_ )
UpperCAmelCase_ = sorted(filter(lambda __A : s.startswith('''layer''' ) and "model_00" in s , lowerCamelCase_ ) )
UpperCAmelCase_ = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = BloomConfig()
for j, file in enumerate(lowerCamelCase_ ):
print('''Processing file: {}'''.format(lowerCamelCase_ ) )
UpperCAmelCase_ = None
for i in range(lowerCamelCase_ ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(lowerCamelCase_ )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
torch.save(
lowerCamelCase_ , os.path.join(
lowerCamelCase_ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) )
UpperCAmelCase_ = BloomConfig()
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ = total_size
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowerCamelCase_ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n'''
f.write(lowerCamelCase_ )
else:
UpperCAmelCase_ = BloomModel(lowerCamelCase_ )
UpperCAmelCase_ = os.listdir(lowerCamelCase_ )
UpperCAmelCase_ = sorted(filter(lambda __A : s.startswith('''layer''' ) and "model_00" in s , lowerCamelCase_ ) )
UpperCAmelCase_ = None
for i, file in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ = None
for i in range(lowerCamelCase_ ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(lowerCamelCase_ )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
UpperCAmelCase_ = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
UpperCAmelCase_ = set(other_keys.missing_keys )
else:
UpperCAmelCase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
UpperCAmelCase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowerCamelCase_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 355 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 0 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def A (__A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None:
"""simple docstring"""
UpperCAmelCase_ = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCAmelCase_ = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase_ = src_path
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 356 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def A (__A : List[Any] ) -> Any:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_A ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def A (__A : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCAmelCase_ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
UpperCAmelCase_ = PipelineDataFormat.from_str(
format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_A , _A )
class __snake_case ( lowerCamelCase__ ):
def __init__( self : Dict , _snake_case : Pipeline , _snake_case : PipelineDataFormat):
"""simple docstring"""
UpperCAmelCase_ = nlp
UpperCAmelCase_ = reader
@staticmethod
def lowerCamelCase ( _snake_case : ArgumentParser):
"""simple docstring"""
UpperCAmelCase_ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=__snake_case , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=__snake_case , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=__snake_case , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=__snake_case , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=__snake_case , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=__snake_case , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__snake_case , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__snake_case , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=__snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self._nlp, []
for entry in self._reader:
UpperCAmelCase_ = nlp(**__snake_case) if self._reader.is_multi_columns else nlp(__snake_case)
if isinstance(__snake_case , __snake_case):
outputs.append(__snake_case)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCAmelCase_ = self._reader.save_binary(__snake_case)
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""")
else:
self._reader.save(__snake_case)
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
def A (__A : List[Any] , __A : Optional[Any] , __A : Tuple ) -> int:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def A (__A : Union[str, Any] , __A : Dict , __A : Tuple ) -> str:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def A (__A : Optional[int] , __A : Tuple , __A : List[Any] ) -> int:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def A (__A : Dict , __A : List[Any] , __A : Optional[int] ) -> Any:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case_ : str = datasets.utils.logging.get_logger(__name__)
snake_case_ : List[str] = ["names", "prefix"]
snake_case_ : Any = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
snake_case_ : Optional[Any] = ["encoding_errors", "on_bad_lines"]
snake_case_ : Optional[int] = ["date_format"]
@dataclass
class __snake_case ( datasets.BuilderConfig ):
UpperCAmelCase__ : str = ","
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[Union[int, List[int], str]] = "infer"
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCAmelCase__ : Optional[Union[List[int], List[str]]] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCAmelCase__ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Union[int, List[int]]] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Union[str, List[str]]] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = "."
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = '"'
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : int = 1_0_0_0_0
UpperCAmelCase__ : Optional[datasets.Features] = None
UpperCAmelCase__ : Optional[str] = "strict"
UpperCAmelCase__ : Literal["error", "warn", "skip"] = "error"
UpperCAmelCase__ : Optional[str] = None
def lowerCamelCase ( self : Dict):
"""simple docstring"""
if self.delimiter is not None:
UpperCAmelCase_ = self.delimiter
if self.column_names is not None:
UpperCAmelCase_ = self.column_names
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a__):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __snake_case ( datasets.ArrowBasedBuilder ):
UpperCAmelCase__ : List[str] = CsvConfig
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def lowerCamelCase ( self : str , _snake_case : Dict):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""")
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(a__ , (str, list, tuple)):
UpperCAmelCase_ = data_files
if isinstance(a__ , a__):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(a__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(a__ , a__):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(a__) for file in files]
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={'''files''': files}))
return splits
def lowerCamelCase ( self : Dict , _snake_case : pa.Table):
"""simple docstring"""
if self.config.features is not None:
UpperCAmelCase_ = self.config.features.arrow_schema
if all(not require_storage_cast(a__) for feature in self.config.features.values()):
# cheaper cast
UpperCAmelCase_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a__)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(a__ , a__)
return pa_table
def lowerCamelCase ( self : Tuple , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a__) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a__)):
UpperCAmelCase_ = pd.read_csv(a__ , iterator=a__ , dtype=a__ , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(a__):
UpperCAmelCase_ = pa.Table.from_pandas(a__)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a__)
except ValueError as e:
logger.error(F"""Failed to read file \'{file}\' with error {type(a__)}: {e}""")
raise
| 359 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 0 |
class __snake_case :
def __init__( self : Any , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = val
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase ( self : str , _snake_case : Tuple):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase_ = Node(__A)
else:
self.left.insert(__A)
elif val > self.val:
if self.right is None:
UpperCAmelCase_ = Node(__A)
else:
self.right.insert(__A)
else:
UpperCAmelCase_ = val
def A (__A : Optional[Any] , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
if root:
inorder(root.left , __A )
res.append(root.val )
inorder(root.right , __A )
def A (__A : Dict ) -> Dict:
"""simple docstring"""
if len(__A ) == 0:
return arr
UpperCAmelCase_ = Node(arr[0] )
for i in range(1 , len(__A ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase_ = []
inorder(__A , __A )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 360 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __snake_case ( a ):
def __init__( self : List[str] , _snake_case : Dict="" , _snake_case : List[Any]="train"):
"""simple docstring"""
assert os.path.isdir(__a)
UpperCAmelCase_ = []
UpperCAmelCase_ = os.listdir(__a)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase_ = os.path.join(__a , __a)
if not os.path.isfile(__a):
continue
self.documents.append(__a)
def __len__( self : Union[str, Any]):
"""simple docstring"""
return len(self.documents)
def __getitem__( self : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.documents[idx]
UpperCAmelCase_ = document_path.split('''/''')[-1]
with open(__a , encoding='''utf-8''') as source:
UpperCAmelCase_ = source.read()
UpperCAmelCase_ , UpperCAmelCase_ = process_story(__a)
return document_name, story_lines, summary_lines
def A (__A : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = list(filter(lambda __A : len(__snake_case ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase_ = [_add_missing_period(__snake_case ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase_ = []
UpperCAmelCase_ = deque(__snake_case )
while True:
try:
UpperCAmelCase_ = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__snake_case )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase_ = list(filter(lambda __A : not t.startswith('''@highlight''' ) , __snake_case ) )
return story_lines, summary_lines
def A (__A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def A (__A : int , __A : Optional[Any] , __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if len(__snake_case ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__snake_case )) )
return sequence
def A (__A : Optional[int] , __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = torch.ones_like(__snake_case )
UpperCAmelCase_ = sequence == pad_token_id
UpperCAmelCase_ = 0
return mask
def A (__A : Union[str, Any] , __A : Dict , __A : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = [tokenizer.encode(__snake_case ) for line in story_lines]
UpperCAmelCase_ = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase_ = [tokenizer.encode(__snake_case ) for line in summary_lines]
UpperCAmelCase_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def A (__A : Any , __A : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = []
for sequence in batch:
UpperCAmelCase_ = -1
UpperCAmelCase_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__snake_case )
return torch.tensor(__snake_case )
| 361 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.