code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 198 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> List[str]:
super().__init__()
lowercase__ : List[str] = model
lowercase__ : Dict = 2
lowercase__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase( self ) -> str:
pass
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# load longformer model from model identifier
lowercase__ : Dict = LongformerModel.from_pretrained(UpperCAmelCase )
lowercase__ : List[str] = LightningModel(UpperCAmelCase )
lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase__ : Optional[int] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 198 | 1 |
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative')
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative')
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance)))),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(_UpperCAmelCase)
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : List[str] = parser.parse_args()
a_ : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 327 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
__lowercase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[str] = BertTokenizer
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : Tuple="[UNK]" , __UpperCAmelCase : Union[str, Any]="[SEP]" , __UpperCAmelCase : List[Any]="[PAD]" , __UpperCAmelCase : int="[CLS]" , __UpperCAmelCase : List[str]="[MASK]" , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , __UpperCAmelCase) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase) != tokenize_chinese_chars
):
a : Tuple = getattr(__UpperCAmelCase , normalizer_state.pop("type"))
a : Optional[int] = do_lower_case
a : Optional[int] = strip_accents
a : Union[str, Any] = tokenize_chinese_chars
a : Union[str, Any] = normalizer_class(**__UpperCAmelCase)
a : Optional[Any] = do_lower_case
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Any=None):
a : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
a : Tuple = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
a : Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
| 40 |
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__(self , lowerCAmelCase ):
super().__init__()
__lowercase= model
__lowercase= 2
__lowercase= nn.Linear(self.model.config.hidden_size , self.num_labels )
def _A (self ):
pass
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= LongformerModel.from_pretrained(lowerCAmelCase_ )
__lowercase= LightningModel(lowerCAmelCase_ )
__lowercase= torch.load(lowerCAmelCase_ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__lowercase= LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase_ )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 369 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase__ : Any = list[list[int]]
# assigning initial values to the grid
lowercase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a__ ( lowercase : Matrix, lowercase : int, lowercase : int, lowercase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a__ ( lowercase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a__ ( lowercase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(lowercase ):
_UpperCamelCase , _UpperCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(lowercase, lowercase, lowercase, lowercase ):
_UpperCamelCase = digit
if sudoku(lowercase ) is not None:
return grid
_UpperCamelCase = 0
return None
def a__ ( lowercase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(lowercase, end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
lowercase__ : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 324 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 65 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = parser.add_parser('''env''' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_SCREAMING_SNAKE_CASE , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Optional[Any] = accelerate_config_file
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Tuple = '''not installed'''
if is_safetensors_available():
import safetensors
A_ : Any = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
A_ : Optional[Any] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ : Union[str, Any] = '''not installed'''
A_ : List[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
A_ : str = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ : List[Any] = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
A_ : Optional[int] = '''not installed'''
A_ : str = '''NA'''
if is_torch_available():
import torch
A_ : Tuple = torch.__version__
A_ : List[Any] = torch.cuda.is_available()
A_ : int = '''not installed'''
A_ : Any = '''NA'''
if is_tf_available():
import tensorflow as tf
A_ : str = tf.__version__
try:
# deprecated in v2.1
A_ : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ : Any = bool(tf.config.list_physical_devices('''GPU''' ) )
A_ : Union[str, Any] = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Union[str, Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
A_ : Tuple = flax.__version__
A_ : List[Any] = jax.__version__
A_ : List[Any] = jaxlib.__version__
A_ : Dict = jax.lib.xla_bridge.get_backend().platform
A_ : Union[str, Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 65 | 1 |
"""simple docstring"""
class __A :
def __init__( self ):
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : List[str] = []
def __A ( self , a__ , a__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase : Dict = self.__min_dist_top_down_dp(a__ , n - 1 )
_lowerCAmelCase : int = self.__min_dist_top_down_dp(m - 1 , a__ )
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase : Optional[int] = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = worda
_lowerCAmelCase : Any = worda
_lowerCAmelCase : List[Any] = [[-1 for _ in range(len(a__ ) )] for _ in range(len(a__ ) )]
return self.__min_dist_top_down_dp(len(a__ ) - 1 , len(a__ ) - 1 )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = worda
_lowerCAmelCase : Union[str, Any] = worda
_lowerCAmelCase : Optional[Any] = len(a__ )
_lowerCAmelCase : Union[str, Any] = len(a__ )
_lowerCAmelCase : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase : Any = j
elif j == 0: # second string is empty
_lowerCAmelCase : Tuple = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase : int = self.dp[i - 1][j - 1]
else:
_lowerCAmelCase : List[str] = self.dp[i][j - 1]
_lowerCAmelCase : Tuple = self.dp[i - 1][j]
_lowerCAmelCase : Dict = self.dp[i - 1][j - 1]
_lowerCAmelCase : Optional[int] = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
if __name__ == "__main__":
_a : List[str] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_a : List[Any] = input('Enter the first string: ').strip()
_a : List[Any] = input('Enter the second string: ').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 44 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
lowercase__ : Optional[Any] = ["names", "prefix"]
lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"]
lowercase__ : List[str] = ["date_format"]
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
"""simple docstring"""
_snake_case = ","
_snake_case = None
_snake_case = "infer"
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = False
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = None
_snake_case = "."
_snake_case = None
_snake_case = '"'
_snake_case = 0
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = 0
_snake_case = True
_snake_case = False
_snake_case = None
_snake_case = 10000
_snake_case = None
_snake_case = "strict"
_snake_case = "error"
_snake_case = None
def A__ ( self )-> Any:
'''simple docstring'''
if self.delimiter is not None:
__UpperCamelCase = self.delimiter
if self.column_names is not None:
__UpperCamelCase = self.column_names
@property
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_snake_case = CsvConfig
def A__ ( self )-> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
__UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
__UpperCamelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__UpperCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ):
# cheaper cast
__UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return pa_table
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__UpperCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
__UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" )
raise
| 328 | 0 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCAmelCase : Tuple = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
UpperCAmelCase : Dict = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
UpperCAmelCase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCAmelCase : Optional[Any] = f"""down_blocks.{i}.resnets.{j}."""
UpperCAmelCase : str = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCAmelCase : str = f"""down_blocks.{i}.attentions.{j}."""
UpperCAmelCase : Any = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCAmelCase : str = f"""up_blocks.{i}.resnets.{j}."""
UpperCAmelCase : Any = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCAmelCase : Optional[Any] = f"""up_blocks.{i}.attentions.{j}."""
UpperCAmelCase : Any = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCAmelCase : Optional[Any] = f"""down_blocks.{i}.downsamplers.0.conv."""
UpperCAmelCase : Dict = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCAmelCase : Optional[Any] = f"""up_blocks.{i}.upsamplers.0."""
UpperCAmelCase : Dict = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCAmelCase : str = "mid_block.attentions.0."
UpperCAmelCase : Dict = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCAmelCase : Optional[int] = f"""mid_block.resnets.{j}."""
UpperCAmelCase : str = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCamelCase = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCamelCase = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = v
lowerCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCAmelCase : List[str] = f"""encoder.down_blocks.{i}.resnets.{j}."""
UpperCAmelCase : int = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCAmelCase : List[str] = f"""down_blocks.{i}.downsamplers.0."""
UpperCAmelCase : Union[str, Any] = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCAmelCase : Dict = f"""up_blocks.{i}.upsamplers.0."""
UpperCAmelCase : Dict = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCAmelCase : Dict = f"""decoder.up_blocks.{i}.resnets.{j}."""
UpperCAmelCase : Tuple = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCAmelCase : Tuple = f"""mid_block.resnets.{i}."""
UpperCAmelCase : int = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCamelCase = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCamelCase = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = v
lowerCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCamelCase = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
lowerCamelCase = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCAmelCase : List[str] = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
UpperCAmelCase : Any = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCAmelCase : Optional[int] = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCAmelCase : Optional[Any] = {"q": 0, "k": 1, "v": 2}
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = {}
lowerCamelCase = {}
lowerCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCamelCase = k[: -len(""".q_proj.weight""" )]
lowerCamelCase = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCamelCase = [None, None, None]
lowerCamelCase = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCamelCase = k[: -len(""".q_proj.bias""" )]
lowerCamelCase = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCamelCase = [None, None, None]
lowerCamelCase = v
continue
lowerCamelCase = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowerCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCamelCase = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowerCamelCase = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCamelCase = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowerCamelCase = torch.cat(lowerCamelCase__ )
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
UpperCAmelCase : Dict = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCAmelCase : List[str] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
UpperCAmelCase : str = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
UpperCAmelCase : Dict = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCAmelCase : int = load_file(unet_path, device="cpu")
else:
UpperCAmelCase : Union[str, Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
UpperCAmelCase : Tuple = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
UpperCAmelCase : Tuple = load_file(vae_path, device="cpu")
else:
UpperCAmelCase : str = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
UpperCAmelCase : Any = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
UpperCAmelCase : Optional[int] = load_file(text_enc_path, device="cpu")
else:
UpperCAmelCase : int = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
UpperCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
UpperCAmelCase : List[Any] = convert_unet_state_dict(unet_state_dict)
UpperCAmelCase : List[str] = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCAmelCase : List[Any] = convert_vae_state_dict(vae_state_dict)
UpperCAmelCase : Union[str, Any] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCAmelCase : str = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCAmelCase : str = {"transformer." + k: v for k, v in text_enc_dict.items()}
UpperCAmelCase : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCAmelCase : Optional[Any] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
UpperCAmelCase : Dict = convert_text_enc_state_dict(text_enc_dict)
UpperCAmelCase : Dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCAmelCase : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCAmelCase : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCAmelCase : Any = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 66 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = Mock()
lowerCamelCase = conn, Mock()
lowerCamelCase = iter([1, None] )
lowerCamelCase = lambda lowerCamelCase__ : next(lowerCamelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCamelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 66 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : str = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_UpperCamelCase : Dict = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Dict = torch.load(_lowerCAmelCase , map_location='cpu' )
return sd
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=rename_keys_prefix ):
'''simple docstring'''
lowercase__ : Union[str, Any] = OrderedDict()
lowercase__ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase__ : List[str] = key
for name_pair in rename_keys_prefix:
lowercase__ : List[Any] = new_key.replace(name_pair[0] , name_pair[1] )
lowercase__ : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase__ : str = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : str ):
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowercase__ : List[Any] = 'pretraining'
if "vcr" in checkpoint_path:
lowercase__ : int = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Tuple = {'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
lowercase__ : Union[str, Any] = {'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
lowercase__ : List[str] = {'visual_embedding_dim': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowercase__ : Optional[Any] = {'visual_embedding_dim': 512}
lowercase__ : int = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
lowercase__ : int = {'visual_embedding_dim': 2048}
lowercase__ : Tuple = 'vqa_advanced'
elif "vqa" in checkpoint_path:
lowercase__ : Union[str, Any] = {'visual_embedding_dim': 2048, 'num_labels': 3129}
lowercase__ : int = 'vqa'
elif "nlvr" in checkpoint_path:
lowercase__ : Tuple = {
'visual_embedding_dim': 1024,
'num_labels': 2,
}
lowercase__ : List[str] = 'nlvr'
lowercase__ : Optional[Any] = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
lowercase__ : List[Any] = load_state_dict(_lowerCAmelCase )
lowercase__ : str = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
lowercase__ : Optional[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
lowercase__ : Optional[Any] = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
lowercase__ : Dict = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
lowercase__ : List[Any] = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 77 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Any = DanceDiffusionPipeline
A__ : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A__ : List[Any] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A__ : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A__ : str = False
A__ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_ = IPNDMScheduler()
UpperCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = DanceDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 122 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__snake_case : Optional[Any] = HUGGINGFACE_HUB_CACHE
__snake_case : str = 'config.json'
__snake_case : Tuple = 'diffusion_pytorch_model.bin'
__snake_case : int = 'diffusion_flax_model.msgpack'
__snake_case : List[str] = 'model.onnx'
__snake_case : List[str] = 'diffusion_pytorch_model.safetensors'
__snake_case : int = 'weights.pb'
__snake_case : Dict = 'https://huggingface.co'
__snake_case : Optional[int] = default_cache_path
__snake_case : str = 'diffusers_modules'
__snake_case : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__snake_case : Tuple = ['fp16', 'non-ema']
__snake_case : Dict = '.self_attn' | 58 |
"""simple docstring"""
from ....utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: int=2048) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = config.__dict__
__lowerCAmelCase : Dict = modal_hidden_size
if num_labels:
__lowerCAmelCase : Optional[int] = num_labels | 58 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__a = deepcopy(_SCREAMING_SNAKE_CASE)
elif os.path.exists(_SCREAMING_SNAKE_CASE):
with io.open(_SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''') as f:
__a = json.load(_SCREAMING_SNAKE_CASE)
else:
try:
__a = baseaa.urlsafe_baadecode(_SCREAMING_SNAKE_CASE).decode('''utf-8''')
__a = json.loads(_SCREAMING_SNAKE_CASE)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}')
__a = config
self.set_stage_and_offload()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_value('''zero_optimization.stage''' , -1)
# offload
__a = False
if self.is_zeroa() or self.is_zeroa():
__a = set(['''cpu''', '''nvme'''])
__a = set(
[
self.get_value('''zero_optimization.offload_optimizer.device'''),
self.get_value('''zero_optimization.offload_param.device'''),
])
if len(offload_devices & offload_devices_valid) > 0:
__a = True
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.config
# find the config node of interest if it exists
__a = ds_key_long.split('''.''')
__a = nodes.pop()
for node in nodes:
__a = config.get(_SCREAMING_SNAKE_CASE)
if config is None:
return None, ds_key
return config, ds_key
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=None):
'''simple docstring'''
__a = self.find_config_node(_SCREAMING_SNAKE_CASE)
if config is None:
return default
return config.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = self.config
# find the config node of interest if it exists
__a = ds_key_long.split('''.''')
for node in nodes:
__a = config
__a = config.get(_SCREAMING_SNAKE_CASE)
if config is None:
if must_exist:
raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}')
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = self.get_value(_SCREAMING_SNAKE_CASE)
return False if value is None else bool(_SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self.get_value(_SCREAMING_SNAKE_CASE)
return False if value is None else not bool(_SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self._stage == 2
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return self._stage == 3
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return self._offload
class _A :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = engine
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
self.engine.backward(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _A ( __UpperCAmelCase ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , device_placement=_SCREAMING_SNAKE_CASE , scaler=_SCREAMING_SNAKE_CASE)
__a = hasattr(self.optimizer , '''overflow''')
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]=None):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowerCamelCase ( self : int):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _A ( __UpperCAmelCase ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _A :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=0.0_01 , __SCREAMING_SNAKE_CASE : Any=0 , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = params
__a = lr
__a = weight_decay
__a = kwargs
class _A :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=0 , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = optimizer
__a = total_num_steps
__a = warmup_num_steps
__a = kwargs
| 49 |
import os
import numpy
import onnx
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = a.name
SCREAMING_SNAKE_CASE_ : Dict = b.name
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
SCREAMING_SNAKE_CASE_ : int = ''
SCREAMING_SNAKE_CASE_ : Tuple = a == b
SCREAMING_SNAKE_CASE_ : Dict = name_a
SCREAMING_SNAKE_CASE_ : List[Any] = name_b
return res
def A_ ( a , a , a ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a , a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
_graph_replace_input_with(node_proto.attribute[1].g , a , a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(a , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_ : List[Any] = inits[i].name
SCREAMING_SNAKE_CASE_ : int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.dirname(a )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(a )
SCREAMING_SNAKE_CASE_ : str = onnx.load(os.path.join(a , a ) )
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : str = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(len(a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a )
dup_set.add(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = inits[j].data_type
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , a )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_ : Any = inits[i].name
SCREAMING_SNAKE_CASE_ : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a )
else:
SCREAMING_SNAKE_CASE_ : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
SCREAMING_SNAKE_CASE_ : Tuple = sorted(a )
_remove_dup_initializers_from_model(a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = 'optimized_' + model_file_name
SCREAMING_SNAKE_CASE_ : Any = os.path.join(a , a )
onnx.save(a , a )
return new_model
| 253 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : str = DDIMPipeline
_lowercase : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowercase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
_lowercase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowercase : Union[str, Any] = False
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : List[str] =UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
a__ : Any =DDIMScheduler()
a__ : List[Any] ={"unet": unet, "scheduler": scheduler}
return components
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
a__ : List[str] =torch.manual_seed(lowerCAmelCase__ )
else:
a__ : Optional[int] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ : Union[str, Any] ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : int ="cpu"
a__ : Union[str, Any] =self.get_dummy_components()
a__ : int =self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Any =self.get_dummy_inputs(lowerCAmelCase__ )
a__ : Tuple =pipe(**lowerCAmelCase__ ).images
a__ : Tuple =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
a__ : str =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
a__ : List[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] ="google/ddpm-cifar10-32"
a__ : str =UNetaDModel.from_pretrained(lowerCAmelCase__ )
a__ : List[str] =DDIMScheduler()
a__ : Any =DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddim.to(lowerCAmelCase__ )
ddim.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Any =torch.manual_seed(0 )
a__ : Optional[int] =ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type="numpy" ).images
a__ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a__ : List[Any] =np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] ="google/ddpm-ema-bedroom-256"
a__ : Dict =UNetaDModel.from_pretrained(lowerCAmelCase__ )
a__ : Any =DDIMScheduler.from_pretrained(lowerCAmelCase__ )
a__ : Union[str, Any] =DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddpm.to(lowerCAmelCase__ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Any =torch.manual_seed(0 )
a__ : List[str] =ddpm(generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
a__ : Optional[Any] =np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 148 |
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : int =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(lowerCAmelCase__ ) != 0:
a__ : List[str] =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
a__ : List[Any] =rows
else:
a__ : str =[]
def _lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _lowercase ( self ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _lowercase ( self ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : str =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase ( self ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : List[str] =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Dict =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Union[str, Any] =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : List[str] =TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
a__ : Tuple =self.rows[0:position] + [row] + self.rows[position:]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : str =TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
a__ : Optional[Any] =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__ : Any =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
a__ : Tuple =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
# setable values
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , _SCREAMING_SNAKE_CASE: CommonSchedulerState , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray) -> Optional[Any]:
"""simple docstring"""
return cls(common=_SCREAMING_SNAKE_CASE , init_noise_sigma=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE)
@dataclass
class A__ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
class A__ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = [e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE = 42
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any:
"""simple docstring"""
return True
@register_to_config
def __init__( self: str , _SCREAMING_SNAKE_CASE: int = 1000 , _SCREAMING_SNAKE_CASE: float = 0.0001 , _SCREAMING_SNAKE_CASE: float = 0.02 , _SCREAMING_SNAKE_CASE: str = "linear" , _SCREAMING_SNAKE_CASE: Optional[jnp.ndarray] = None , _SCREAMING_SNAKE_CASE: str = "fixed_small" , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: str = "epsilon" , _SCREAMING_SNAKE_CASE: jnp.dtype = jnp.floataa , ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = dtype
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Optional[CommonSchedulerState] = None) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
__lowerCAmelCase : str = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__lowerCAmelCase : Tuple = jnp.array(1.0 , dtype=self.dtype)
__lowerCAmelCase : List[str] = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=_SCREAMING_SNAKE_CASE , init_noise_sigma=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: DDPMSchedulerState , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: Optional[int] = None) -> jnp.ndarray:
"""simple docstring"""
return sample
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: DDPMSchedulerState , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple = ()) -> DDPMSchedulerState:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCAmelCase : Tuple = (jnp.arange(0 , _SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: DDPMSchedulerState , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Any=None) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
__lowerCAmelCase : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCAmelCase : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCAmelCase : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCAmelCase : Dict = jnp.clip(_SCREAMING_SNAKE_CASE , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCAmelCase : Union[str, Any] = jnp.log(jnp.clip(_SCREAMING_SNAKE_CASE , a_min=1e-20))
elif variance_type == "fixed_large":
__lowerCAmelCase : Any = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCAmelCase : Tuple = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCAmelCase : Union[str, Any] = variance
__lowerCAmelCase : Optional[Any] = state.common.betas[t]
__lowerCAmelCase : List[Any] = (predicted_variance + 1) / 2
__lowerCAmelCase : Optional[int] = frac * max_log + (1 - frac) * min_log
return variance
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: DDPMSchedulerState , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: Optional[jax.random.KeyArray] = None , _SCREAMING_SNAKE_CASE: bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowerCAmelCase : Any = timestep
if key is None:
__lowerCAmelCase : str = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase : Tuple = jnp.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1)
else:
__lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
__lowerCAmelCase : Union[str, Any] = state.common.alphas_cumprod[t]
__lowerCAmelCase : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
__lowerCAmelCase : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCAmelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCAmelCase : Dict = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCAmelCase : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCAmelCase : List[str] = jnp.clip(_SCREAMING_SNAKE_CASE , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase : Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCAmelCase : str = jax.random.split(_SCREAMING_SNAKE_CASE , num=1)
__lowerCAmelCase : List[Any] = jax.random.normal(_SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE) ** 0.5) * noise
__lowerCAmelCase : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: DDPMSchedulerState , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: DDPMSchedulerState , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __len__( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps | 269 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __A (snake_case_):
'''simple docstring'''
__lowercase: Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
__lowercase: List[Any] = True
__lowercase: Dict = "ml.p3.2xlarge"
__lowercase: Dict = "accelerate_sagemaker_execution_role"
__lowercase: Union[str, Any] = "hf-sm"
__lowercase: str = "us-east-1"
__lowercase: Optional[Any] = 1
__lowercase: Any = "accelerate-sagemaker-1"
__lowercase: str = "1.6"
__lowercase: List[str] = "4.4"
__lowercase: Optional[Any] = "train.py"
__lowercase: Any = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
__lowercase: List[str] = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
snake_case_ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , _A )
assert isinstance(converted_args["""do_train"""] , _A )
assert isinstance(converted_args["""epochs"""] , _A )
assert isinstance(converted_args["""learning_rate"""] , _A )
assert isinstance(converted_args["""max_steps"""] , _A )
with pytest.raises(_A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : int = 32
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ) -> Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["""lr"""]
snake_case_ = int(config["""num_epochs"""] )
snake_case_ = int(config["""seed"""] )
snake_case_ = int(config["""batch_size"""] )
snake_case_ = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ = 1
snake_case_ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
snake_case_ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("""glue""" , """mrpc""" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
snake_case_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( ) -> int:
snake_case_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of train epochs.""" , )
snake_case_ = parser.parse_args()
snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233 | 0 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
lowerCamelCase : List[str] = '''1'''
lowerCamelCase : List[str] = '''0'''
lowerCamelCase : Optional[Any] = '''1'''
lowerCamelCase : List[Any] = ort.SessionOptions()
lowerCamelCase : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
lowerCamelCase : List[str] = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCamelCase : Optional[Any] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
lowerCamelCase : Any = ort.RunOptions()
lowerCamelCase : int = 128
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Dict = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase : str = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase : Tuple = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
lowerCamelCase : Union[str, Any] = time.time()
lowerCamelCase : List[Any] = 2_000
lowerCamelCase : Any = {}
for iter in range(max_iters):
lowerCamelCase : List[str] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
| 2 |
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int]=7 , UpperCamelCase : str=3 , UpperCamelCase : List[str]=30 , UpperCamelCase : Any=4_00 , UpperCamelCase : List[Any]=True , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=True , UpperCamelCase : str=1 / 2_55 , UpperCamelCase : Dict=True , ) -> Union[str, Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : str = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : Tuple = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : int = do_normalize
lowerCAmelCase__ : List[Any] = image_mean
lowerCAmelCase__ : Union[str, Any] = image_std
lowerCAmelCase__ : Optional[int] = do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor
lowerCAmelCase__ : List[str] = do_pad
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if not batched:
lowerCAmelCase__ : Dict = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ : Dict = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : Any = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase__ : Dict = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase__ : Union[str, Any] = self.size["""shortest_edge"""]
lowerCAmelCase__ : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase__ : Any = self.size["""shortest_edge"""]
lowerCAmelCase__ : List[Any] = self.size["""shortest_edge"""]
else:
lowerCAmelCase__ : List[str] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
lowerCAmelCase__ : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
lowerCAmelCase__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
lowerCAmelCase__ : str = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ : Dict = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
lowerCAmelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase__ : List[str] = json.loads(f.read() )
lowerCAmelCase__ : Tuple = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
lowerCAmelCase__ : int = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
lowerCAmelCase__ : List[Any] = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
lowerCAmelCase__ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Any = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Tuple = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
lowerCAmelCase__ : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
lowerCAmelCase__ : Any = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
# prepare image, target and masks_path
lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase__ : Optional[int] = json.loads(f.read() )
lowerCAmelCase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
lowerCAmelCase__ : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase__ : Optional[int] = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase__ : Optional[int] = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ : Optional[int] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
lowerCAmelCase__ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Union[str, Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
lowerCAmelCase__ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
lowerCAmelCase__ : Tuple = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
lowerCAmelCase__ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
lowerCAmelCase__ : Dict = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 212 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase :ClassVar[Features] = Features({"audio": Audio()} )
_lowerCamelCase :ClassVar[Features] = Features({"labels": ClassLabel} )
_lowerCamelCase :str = "audio"
_lowerCamelCase :str = "labels"
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase__ : str = copy.deepcopy(self )
lowerCAmelCase__ : Optional[int] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCAmelCase ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 212 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "xglm"
lowercase__ = ["past_key_values"]
lowercase__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self: Dict, a_: int=256_008, a_: List[str]=2_048, a_: Dict=1_024, a_: int=4_096, a_: List[Any]=24, a_: Any=16, a_: Dict="gelu", a_: Optional[Any]=0.1, a_: str=0.1, a_: Union[str, Any]=0.0, a_: List[str]=0.0, a_: List[Any]=0.02, a_: Dict=True, a_: int=True, a_: List[Any]=2, a_: str=1, a_: Optional[int]=0, a_: Tuple=2, **a_: Tuple, ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Union[str, Any] = d_model
_snake_case : Optional[int] = ffn_dim
_snake_case : List[Any] = num_layers
_snake_case : int = attention_heads
_snake_case : int = activation_function
_snake_case : List[str] = dropout
_snake_case : List[Any] = attention_dropout
_snake_case : Any = activation_dropout
_snake_case : Union[str, Any] = layerdrop
_snake_case : int = init_std
_snake_case : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : Union[str, Any] = use_cache
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, **a_, )
| 64 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_A : int = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class a__ ( unittest.TestCase, a_ ):
def __magic_name__ ( self ):
lowercase : Tuple = load_tool("text-question-answering" )
self.tool.setup()
lowercase : Dict = load_tool("text-question-answering" , remote=_a )
def __magic_name__ ( self ):
lowercase : str = self.tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.remote_tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : int = self.tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.remote_tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
| 202 | 0 |
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : str = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCAmelCase__ : str = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCAmelCase__ : Union[str, Any] = min(_a , _a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 211 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = k_size // 2
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase__ : str = 1 / (2 * pi * sigma) * exp(-(square(_a ) + square(_a )) / (2 * square(_a )) )
return g
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase__ : Any = height - k_size + 1
lowerCAmelCase__ : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase__ : int = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase__ : List[str] = 0
for i, j in product(range(_a ) , range(_a ) ):
lowerCAmelCase__ : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase__ : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase__ : List[Any] = gen_gaussian_kernel(_a , _a )
lowerCAmelCase__ : str = ravel(_a )
# reshape and get the dst image
lowerCAmelCase__ : int = dot(_a , _a ).reshape(_a , _a ).astype(_a )
return dst
if __name__ == "__main__":
# read original image
lowerCamelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCamelCase = gaussian_filter(gray, 3, sigma=1)
lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 211 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a_ :str = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a_ :Any = concatenate_datasets
a_ :Optional[Any] = DownloadConfig
a_ :int = DownloadManager
a_ :Optional[Any] = DownloadMode
a_ :Dict = DownloadConfig
a_ :Any = DownloadMode
a_ :Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 277 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
A: Optional[int] = list[tuple[int, int]]
A: str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A: Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pos_x
UpperCAmelCase : Tuple = pos_y
UpperCAmelCase : Optional[int] = (pos_y, pos_x)
UpperCAmelCase : Tuple = goal_x
UpperCAmelCase : Tuple = goal_y
UpperCAmelCase : Tuple = g_cost
UpperCAmelCase : Any = parent
UpperCAmelCase : int = self.calculate_heuristic()
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
UpperCAmelCase : Any = abs(self.pos_x - self.goal_x )
UpperCAmelCase : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = [self.start]
UpperCAmelCase : list[Node] = []
UpperCAmelCase : Tuple = False
def SCREAMING_SNAKE_CASE ( self ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : Any = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> list[Node]:
'''simple docstring'''
UpperCAmelCase : int = []
for action in delta:
UpperCAmelCase : Optional[int] = parent.pos_x + action[1]
UpperCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Path:
'''simple docstring'''
UpperCAmelCase : Dict = node
UpperCAmelCase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : str = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A: str = (0, 0)
A: str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
A: Any = GreedyBestFirst(init, goal)
A: Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
A: Optional[int] = 2
for elem in grid:
print(elem)
| 76 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _snake_case ( UpperCamelCase : np.ndarray ):
return input_array.reshape((input_array.size, 1) )
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : int ):
UpperCAmelCase : Optional[int] = np.nan
for i in range(UpperCamelCase ):
UpperCAmelCase : int = features[:, labels == i]
UpperCAmelCase : List[Any] = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase : Dict = data - column_reshape(UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase : Optional[Any] = np.dot(UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : int ):
UpperCAmelCase : Tuple = features.mean(1 )
UpperCAmelCase : Union[str, Any] = np.nan
for i in range(UpperCamelCase ):
UpperCAmelCase : int = features[:, labels == i]
UpperCAmelCase : List[str] = data.shape[1]
UpperCAmelCase : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : int ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase : Tuple = features.mean(1 )
# Center the dataset
UpperCAmelCase : List[str] = features - np.reshape(UpperCamelCase , (data_mean.size, 1) )
UpperCAmelCase : str = np.dot(UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCAmelCase , UpperCAmelCase : int = np.linalg.eigh(UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase : int = np.dot(filtered_eigenvectors.T , UpperCamelCase )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : int ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase , UpperCAmelCase : Dict = eigh(
covariance_between_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , covariance_within_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
UpperCAmelCase : Any = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = np.linalg.svd(UpperCamelCase )
UpperCAmelCase : Tuple = svd_matrix[:, 0:dimensions]
UpperCAmelCase : Tuple = np.dot(filtered_svd_matrix.T , UpperCamelCase )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=UpperCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def _snake_case ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase : Dict = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase : List[str] = 2
UpperCAmelCase : int = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase ) as error_info:
UpperCAmelCase : Union[str, Any] = linear_discriminant_analysis(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def _snake_case ( ):
UpperCAmelCase : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Any = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase ) as error_info:
UpperCAmelCase : Tuple = principal_component_analysis(UpperCamelCase , UpperCamelCase )
if not np.allclose(UpperCamelCase , UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(__lowerCAmelCase , __lowerCAmelCase )
return actual_power(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 136 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__lowerCAmelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__lowerCAmelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__lowerCAmelCase ):
return ARTICLES_REGEX.sub(""" """ , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
lowercase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
lowercase_ = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = {}
lowercase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ = qa["""id"""]
lowercase_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase_ = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
lowercase_ = preds[qid]
# Take max over all gold answers
lowercase_ = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
lowercase_ = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = {}
for qid, s in scores.items():
lowercase_ = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase_ = float(not qid_to_has_ans[qid] )
else:
lowercase_ = s
return new_scores
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
if not qid_list:
lowercase_ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
lowercase_ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
for k in new_eval:
lowercase_ = new_eval[k]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
plt.step(__lowerCAmelCase , __lowerCAmelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[Any]:
'''simple docstring'''
lowercase_ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
lowercase_ = 0.0
lowercase_ = 1.0
lowercase_ = 0.0
lowercase_ = [1.0]
lowercase_ = [0.0]
lowercase_ = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase_ = true_pos / float(i + 1 )
lowercase_ = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
lowercase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
lowercase_ = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_exact""" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_f1""" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_oracle""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
lowercase_ = [na_probs[k] for k in qid_list]
lowercase_ = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase_ = num_no_ans
lowercase_ = cur_score
lowercase_ = 0.0
lowercase_ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase_ = scores[qid]
else:
if preds[qid]:
lowercase_ = -1
else:
lowercase_ = 0
cur_score += diff
if cur_score > best_score:
lowercase_ = cur_score
lowercase_ = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = best_exact
lowercase_ = exact_thresh
lowercase_ = best_fa
lowercase_ = fa_thresh
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
lowercase_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
else:
lowercase_ = {k: 0.0 for k in preds}
lowercase_ = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
lowercase_ = [k for k, v in qid_to_has_ans.items() if v]
lowercase_ = [k for k, v in qid_to_has_ans.items() if not v]
lowercase_ , lowercase_ = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
lowercase_ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """HasAns""" )
if no_ans_qids:
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 136 | 1 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[int] , snake_case_ : str , snake_case_ : Any=13 , snake_case_ : Tuple=30 , snake_case_ : List[Any]=2 , snake_case_ : str=3 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=True , snake_case_ : str=32 , snake_case_ : str=5 , snake_case_ : List[Any]=4 , snake_case_ : str=37 , snake_case_ : int="gelu" , snake_case_ : Any=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Union[str, Any]=10 , snake_case_ : Tuple=0.02 , snake_case_ : List[str]=3 , snake_case_ : Any=0.6 , snake_case_ : Dict=None , ) -> Tuple:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = mask_ratio
A__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __magic_name__ ( self : Any , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : str ) -> Tuple:
'''simple docstring'''
A__ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Any , snake_case_ : str , snake_case_ : int , snake_case_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE )
A__ = (self.image_size // self.patch_size) ** 2
A__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A__ = 1
A__ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(_SCREAMING_SNAKE_CASE )
A__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__, A__, A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase, _UpperCamelCase, unittest.TestCase ):
lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
A__ = ViTMAEModelTester(self )
A__ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
pass
def __magic_name__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __magic_name__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_SCREAMING_SNAKE_CASE )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any ) -> int:
'''simple docstring'''
np.random.seed(2 )
A__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A__ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A__ = outputs[0].cpu().numpy()
A__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A__ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A__ = after_outputs[0].cpu().numpy()
A__ = 0
A__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __magic_name__ ( self : Any ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __magic_name__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __magic_name__ ( self : Any ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
np.random.seed(2 )
A__ = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(_SCREAMING_SNAKE_CASE )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A__ = ViTMAEConfig()
A__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A__ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A__ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A__ = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
| 368 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = model.config
A__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
if "encoder.model" in name:
A__ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
A__ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
A__ = "encoder." + name
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
A__ = "encoder.layernorm.bias"
return name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[3] )
A__ = int(key_split[5] )
A__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
# load original model
A__ = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
A__, A__ = get_configs(lowercase_ )
A__ = DonutSwinModel(lowercase_ )
A__ = MBartForCausalLM(lowercase_ )
A__ = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
A__ = original_model.state_dict()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
A__ = load_dataset("hf-internal-testing/example-documents" )
A__ = dataset["test"][0]["image"].convert("RGB" )
A__ = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
A__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ = DonutProcessor(lowercase_ , lowercase_ )
A__ = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = "When is the coffee break?"
A__ = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ = "hello world"
else:
raise ValueError("Model name not supported" )
A__ = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
A__ = original_model.encoder.model.patch_embed(lowercase_ )
A__, A__ = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
A__ = original_model.encoder(lowercase_ )
A__ = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
A__ = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
A__ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def _snake_case ( UpperCAmelCase_ : Dict ):
for pegasus_name, hf_name in PATTERNS:
A__ = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def _snake_case ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ):
A__ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
A__ = PegasusConfig(**UpperCAmelCase_ )
A__ = PegasusForConditionalGeneration(UpperCAmelCase_ )
A__ = torch_model.model.state_dict()
A__ = {}
for k, v in tf_weights.items():
A__ = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
A__ = v.T
A__ = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
A__ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
A__ = mapping["""shared.weight"""]
A__ = mapping["""shared.weight"""]
A__ = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
A__ , A__ = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
A__ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _snake_case ( UpperCAmelCase_ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ):
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
A__ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCAmelCase_ , desc="""converting tf checkpoint to dict""" ):
A__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
return tf_weights
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# save tokenizer first
A__ = Path(UpperCAmelCase_ ).parent.name
A__ = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
A__ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
A__ = get_tf_weights_as_numpy(UpperCAmelCase_ )
A__ = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
A__ = task_specific_params
A__ = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
A__ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE_ : int = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE_ : Any = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
snake_case = DiTPipeline
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__a , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__a , )
A_ : Dict = AutoencoderKL()
A_ : Tuple = DDIMScheduler()
A_ : int = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Union[str, Any]:
'''simple docstring'''
if str(__a ).startswith('''mps''' ):
A_ : Dict = torch.manual_seed(__a )
else:
A_ : Any = torch.Generator(device=__a ).manual_seed(__a )
A_ : int = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Optional[Any] = 'cpu'
A_ : Union[str, Any] = self.get_dummy_components()
A_ : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
A_ : List[Any] = self.get_dummy_inputs(__a )
A_ : Optional[int] = pipe(**__a ).images
A_ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
A_ : Union[str, Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
A_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def _snake_case ( self )->List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
A_ : Dict = ['vase', 'umbrella', 'white shark', 'white wolf']
A_ : Any = pipe.get_label_ids(__a )
A_ : str = pipe(__a , generator=__a , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(__a , __a ):
A_ : List[str] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
A_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
A_ : List[str] = ['vase', 'umbrella']
A_ : int = pipe.get_label_ids(__a )
A_ : Dict = torch.manual_seed(0 )
A_ : Dict = pipe(__a , generator=__a , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(__a , __a ):
A_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 350 |
from bisect import bisect
from itertools import accumulate
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
A_ , A_ : str = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(SCREAMING_SNAKE_CASE ) )
A_ : Optional[int] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCamelCase__ ( __lowerCAmelCase : ndarray ):
"""simple docstring"""
return np.dot(__lowerCAmelCase , __lowerCAmelCase )
class _lowerCAmelCase :
def __init__( self , *,
_UpperCamelCase = np.inf , _UpperCamelCase = "linear" , _UpperCamelCase = 0.0 , ) -> None:
lowerCAmelCase_ = regularization
lowerCAmelCase_ = gamma
if kernel == "linear":
lowerCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
lowerCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> float:
return np.dot(_UpperCamelCase , _UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
lowerCAmelCase_ = observations
lowerCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase_) , ) = np.shape(_UpperCamelCase )
def to_minimize(_UpperCamelCase ) -> float:
lowerCAmelCase_ = 0
((lowerCAmelCase_) , ) = np.shape(_UpperCamelCase )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_UpperCamelCase )
lowerCAmelCase_ = LinearConstraint(_UpperCamelCase , 0 , 0 )
lowerCAmelCase_ = Bounds(0 , self.regularization )
lowerCAmelCase_ = minimize(
_UpperCamelCase , np.ones(_UpperCamelCase ) , bounds=_UpperCamelCase , constraints=[ly_contraint] ).x
lowerCAmelCase_ = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase_ = 0
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCAmelCase_ = s / n
def __a ( self , _UpperCamelCase ) -> int:
lowerCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _UpperCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowerCAmelCase_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
DownloadCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
RunCommand.register_subcommand(__lowerCAmelCase )
ServeCommand.register_subcommand(__lowerCAmelCase )
UserCommands.register_subcommand(__lowerCAmelCase )
AddNewModelCommand.register_subcommand(__lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCAmelCase )
LfsCommands.register_subcommand(__lowerCAmelCase )
PTtoTFCommand.register_subcommand(__lowerCAmelCase )
# Let's go
lowerCAmelCase_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 231 | 1 |
'''simple docstring'''
from math import pi, sqrt
def lowercase_ ( lowerCAmelCase__ : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase_ ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase = 1.0
while num:
_UpperCamelCase = float(input('''Gamma of: '''))
print(F'gamma({num}) = {gamma(num)}')
print('''\nEnter 0 to exit...''')
| 16 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16 | 1 |
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = """tapas"""
def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=[3, 256, 256, 2, 256, 256, 10] , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1_0.0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="ratio" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->str:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_sizes
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase = positive_label_weight
lowerCAmelCase = num_aggregation_labels
lowerCAmelCase = aggregation_loss_weight
lowerCAmelCase = use_answer_as_supervision
lowerCAmelCase = answer_loss_importance
lowerCAmelCase = use_normalized_answer_loss
lowerCAmelCase = huber_loss_delta
lowerCAmelCase = temperature
lowerCAmelCase = aggregation_temperature
lowerCAmelCase = use_gumbel_for_cells
lowerCAmelCase = use_gumbel_for_aggregation
lowerCAmelCase = average_approximation_function
lowerCAmelCase = cell_selection_preference
lowerCAmelCase = answer_loss_cutoff
lowerCAmelCase = max_num_rows
lowerCAmelCase = max_num_columns
lowerCAmelCase = average_logits_per_cell
lowerCAmelCase = select_one_column
lowerCAmelCase = allow_empty_column_selection
lowerCAmelCase = init_cell_selection_weights_to_zero
lowerCAmelCase = reset_position_index_per_cell
lowerCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase = aggregation_labels
lowerCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in aggregation_labels.items()}
| 338 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22 |
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case : Optional[Any] = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
snake_case : Dict = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
snake_case : Union[str, Any] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCamelCase , hypotheses=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase )
}
| 94 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ):
"""simple docstring"""
a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ):
"""simple docstring"""
if split_mlp_wi:
a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
a :Dict = (wi_a, wi_a)
else:
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :str = traverse_util.flatten_dict(variables['''target'''] )
a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase_ )
a :Optional[Any] = collections.OrderedDict()
# Shared embeddings.
a :Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' )
a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' )
a :List[Any] = layer_norm
a :str = k.T
a :Dict = o.T
a :int = q.T
a :Optional[Any] = v.T
# Block i, layer 1 (MLP).
a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ )
a :Any = layer_norm
if split_mlp_wi:
a :Any = wi[0].T
a :Tuple = wi[1].T
else:
a :List[str] = wi.T
a :List[Any] = wo.T
a :Union[str, Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
a :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' )
a :List[Any] = layer_norm
a :Tuple = k.T
a :int = o.T
a :Any = q.T
a :Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' )
a :str = layer_norm
a :Optional[Any] = k.T
a :Any = o.T
a :Dict = q.T
a :Optional[Any] = v.T
# Block i, layer 2 (MLP).
a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ )
a :Optional[int] = layer_norm
if split_mlp_wi:
a :int = wi[0].T
a :Tuple = wi[1].T
else:
a :str = wi.T
a :Dict = wo.T
a :Any = old['''decoder/decoder_norm/scale''']
a :Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a :Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a :Tuple = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
a :Optional[Any] = state_dict['''shared.weight''']
return state_dict
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ )
a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a :Any = TaEncoderModel(UpperCAmelCase_ )
else:
a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase_ )
print('''Done''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 94 | 1 |
"""simple docstring"""
import operator as op
UpperCAmelCase : List[Any] = "scaler.pt"
UpperCAmelCase : List[str] = "pytorch_model"
UpperCAmelCase : int = "random_states"
UpperCAmelCase : Tuple = "optimizer"
UpperCAmelCase : Dict = "scheduler"
UpperCAmelCase : Any = "pytorch_model.bin"
UpperCAmelCase : List[Any] = "pytorch_model.bin.index.json"
UpperCAmelCase : Dict = "model.safetensors"
UpperCAmelCase : Any = "model.safetensors.index.json"
UpperCAmelCase : List[str] = "1.10.2"
UpperCAmelCase : Any = "py38"
UpperCAmelCase : str = "4.17.0"
UpperCAmelCase : Optional[Any] = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
UpperCAmelCase : Tuple = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
UpperCAmelCase : List[Any] = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
UpperCAmelCase : List[str] = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
UpperCAmelCase : Dict = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
UpperCAmelCase : str = "2.0.1"
UpperCAmelCase : Dict = ["pdsh", "standard", "openmpi", "mvapich"]
UpperCAmelCase : str = ["default", "reduce-overhead", "max-autotune"]
UpperCAmelCase : Optional[Any] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase : str = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
UpperCAmelCase : Any = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
UpperCAmelCase : List[Any] = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 313 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = pos
def UpperCAmelCase__ ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> int:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase_ : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase_ : Tuple = 2 * start + 1
else:
UpperCAmelCase_ : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase_ : List[Any] = heap[smallest_child], positions[smallest_child]
UpperCAmelCase_ : str = (
heap[start],
positions[start],
)
UpperCAmelCase_ : Optional[int] = temp, tempa
UpperCAmelCase_ : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase__ )
self.top_to_bottom(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Any = position[index]
while index != 0:
UpperCAmelCase_ : Optional[int] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase_ : str = heap[parent]
UpperCAmelCase_ : Any = position[parent]
self.set_position(position[parent] , UpperCAmelCase__ )
else:
UpperCAmelCase_ : Optional[Any] = val
UpperCAmelCase_ : Optional[Any] = temp
self.set_position(UpperCAmelCase__ , UpperCAmelCase__ )
break
UpperCAmelCase_ : List[Any] = parent
else:
UpperCAmelCase_ : List[Any] = val
UpperCAmelCase_ : List[Any] = temp
self.set_position(UpperCAmelCase__ , 0 )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = len(UpperCAmelCase__ ) // 2 - 1
for i in range(UpperCAmelCase__ , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase__ , UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = positions[0]
UpperCAmelCase_ : Optional[int] = sys.maxsize
self.top_to_bottom(UpperCAmelCase__ , 0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
return temp
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
UpperCAmelCase_ : Any = Heap()
UpperCAmelCase_ : List[Any] = [0] * len(_A )
UpperCAmelCase_ : List[str] = [-1] * len(_A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase_ : int = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase_ : List[Any] = []
for vertex in range(len(_A ) ):
distance_tv.append(sys.maxsize )
positions.append(_A )
heap.node_position.append(_A )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Dict = distance
heap.heapify(_A, _A )
for _ in range(1, len(_A ) ):
UpperCAmelCase_ : Optional[Any] = heap.delete_minimum(_A, _A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase_ : int = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_A )]
):
UpperCAmelCase_ : List[Any] = distance
heap.bottom_to_top(
_A, heap.get_position(_A ), _A, _A )
UpperCAmelCase_ : Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
snake_case_ : Union[str, Any] = int(input("Enter number of edges: ").strip())
snake_case_ : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
snake_case_ : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 125 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : str = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _lowercase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
SCREAMING_SNAKE_CASE : List[str] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def _lowercase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _lowercase ( self : int , UpperCAmelCase__ : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = list(struct.unpack(""">16L""" , UpperCAmelCase__ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _lowercase ( self : Any ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.padding()
SCREAMING_SNAKE_CASE : Any = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE : str = self.expand_block(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
SCREAMING_SNAKE_CASE : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE : str = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
SCREAMING_SNAKE_CASE : List[Any] = b ^ c ^ d
SCREAMING_SNAKE_CASE : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
SCREAMING_SNAKE_CASE : Dict = b ^ c ^ d
SCREAMING_SNAKE_CASE : int = 0XCA_62_C1_D6
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(UpperCAmelCase__ , 3_0 ),
c,
d,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = B"""Test String"""
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def __lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
else:
SCREAMING_SNAKE_CASE : Tuple = bytes(_A , """utf-8""" )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 245 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32 | """simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32 | 1 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : List[str] , a__ : Optional[Any] , a__ : Dict ) -> Any:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase ( a__ : List[str] , a__ : Optional[Any] , a__ : Dict = None ) -> List[Any]:
_UpperCamelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
_UpperCamelCase = to_pil_image(UpperCamelCase__ )
_UpperCamelCase , _UpperCamelCase = pil_image.size
_UpperCamelCase = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='''dict''' , config=UpperCamelCase__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_UpperCamelCase = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
_UpperCamelCase = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
_UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
_UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
_UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
_UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCamelCase = []
for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
_UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
_UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( lowercase_):
snake_case__ = ['''pixel_values''']
def __init__( self : Any , __UpperCamelCase : str = True , __UpperCamelCase : Any = None , __UpperCamelCase : Optional[int] = PILImageResampling.BILINEAR , __UpperCamelCase : Union[str, Any] = True , __UpperCamelCase : List[str] = None , __UpperCamelCase : int = "" , **__UpperCamelCase : Optional[Any] , ) -> Any:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(__UpperCamelCase )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = apply_ocr
_UpperCamelCase = ocr_lang
_UpperCamelCase = tesseract_config
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : int = PILImageResampling.BILINEAR , __UpperCamelCase : int = None , **__UpperCamelCase : List[Any] , ) -> List[str]:
_UpperCamelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_UpperCamelCase = (size['''height'''], size['''width'''])
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : int = None , __UpperCamelCase : int = None , __UpperCamelCase : Union[str, Any] = None , __UpperCamelCase : Any = None , __UpperCamelCase : List[Any] = None , __UpperCamelCase : int = None , __UpperCamelCase : int = None , __UpperCamelCase : List[Any] = ChannelDimension.FIRST , **__UpperCamelCase : str , ) -> Any:
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__UpperCamelCase )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCamelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_UpperCamelCase = []
_UpperCamelCase = []
for image in images:
_UpperCamelCase , _UpperCamelCase = apply_tesseract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
words_batch.append(__UpperCamelCase )
boxes_batch.append(__UpperCamelCase )
if do_resize:
_UpperCamelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_UpperCamelCase = [flip_channel_order(__UpperCamelCase ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCamelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__UpperCamelCase )
if apply_ocr:
_UpperCamelCase = words_batch
_UpperCamelCase = boxes_batch
return data
| 256 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ):
if not conversation_id:
snake_case_ = uuid.uuida()
if past_user_inputs is None:
snake_case_ = []
if generated_responses is None:
snake_case_ = []
snake_case_ = conversation_id
snake_case_ = past_user_inputs
snake_case_ = generated_responses
snake_case_ = text
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def a ( self , snake_case , snake_case = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
snake_case_ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
snake_case_ = text
def a ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ = None
def a ( self , snake_case ):
self.generated_responses.append(snake_case )
def a ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
snake_case_ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
snake_case_ = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( lowercase_ ):
def __init__( self , *snake_case , **snake_case ):
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
snake_case_ = self.tokenizer.eos_token
def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
if min_length_for_response is not None:
snake_case_ = min_length_for_response
if minimum_tokens is not None:
snake_case_ = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ):
snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def a ( self , snake_case , snake_case=32 ):
if not isinstance(snake_case , snake_case ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
snake_case_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def a ( self , snake_case , snake_case=10 , **snake_case ):
snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length )
snake_case_ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
snake_case_ = max_length - minimum_tokens
snake_case_ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ = model_inputs['attention_mask'][:, -trim:]
snake_case_ = model_inputs.pop('conversation' )
snake_case_ = max_length
snake_case_ = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
snake_case_ = 1
else:
snake_case_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def a ( self , snake_case , snake_case=True ):
snake_case_ = model_outputs['output_ids']
snake_case_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
snake_case_ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def a ( self , snake_case ):
snake_case_ = self.tokenizer.eos_token_id
snake_case_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
snake_case_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 285 | 0 |
import datasets
from .evaluate import evaluate
lowerCAmelCase : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCAmelCase : Tuple = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCAmelCase : Union[str, Any] = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
},
}) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
SCREAMING_SNAKE_CASE_: Tuple = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_: Union[str, Any] = evaluate(dataset=a__ , predictions=a__)
return score
| 354 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=1 / 255 , lowerCAmelCase__ : int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_: Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Tuple = batch_size
SCREAMING_SNAKE_CASE_: Tuple = num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE_: Tuple = max_resolution
SCREAMING_SNAKE_CASE_: List[Any] = do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Any = image_mean
SCREAMING_SNAKE_CASE_: Dict = image_std
SCREAMING_SNAKE_CASE_: Tuple = do_rescale
SCREAMING_SNAKE_CASE_: int = rescale_factor
SCREAMING_SNAKE_CASE_: int = do_pad
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=False):
if not batched:
SCREAMING_SNAKE_CASE_: List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_: List[Any] = int(self.size["shortest_edge"] * h / w)
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_: Any = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(self.size["shortest_edge"] * w / h)
else:
SCREAMING_SNAKE_CASE_: int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Dict = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_: int = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
SCREAMING_SNAKE_CASE_: Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[0])[0]
SCREAMING_SNAKE_CASE_: Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Any = DeformableDetrImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = DeformableDetrImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_pad"))
self.assertTrue(hasattr(lowerCAmelCase__ , "size"))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__)
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : str):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_: str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Any = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_: Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image and target
SCREAMING_SNAKE_CASE_: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: str = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_: str = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE_: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: str = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: str = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify orig_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: str = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: List[Any] = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE_: int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
SCREAMING_SNAKE_CASE_: Any = DeformableDetrImageProcessor(format="coco_panoptic")
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Dict = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: List[str] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: Any = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify masks
SCREAMING_SNAKE_CASE_: Tuple = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__)
# verify orig_size
SCREAMING_SNAKE_CASE_: str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
| 127 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a_ ( lowerCamelCase ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase__ : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ):
UpperCAmelCase__ = parser.add_parser(
'convert' ,help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' ,)
train_parser.add_argument('--model_type' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' ,type=lowerCamelCase__ ,default='' ,help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,help='Optional fine-tuning task name if the TF model was a finetuned model.' ,)
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,*lowerCamelCase__ : Any ,):
UpperCAmelCase__ = logging.get_logger('transformers-cli/converting' )
self._logger.info(f'''Loading model {model_type}''' )
UpperCAmelCase__ = model_type
UpperCAmelCase__ = tf_checkpoint
UpperCAmelCase__ = pytorch_dump_output
UpperCAmelCase__ = config
UpperCAmelCase__ = finetuning_task_name
def __lowerCAmelCase ( self : str ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''
else:
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase__ ,self._config ,self._pytorch_dump_output ,lowerCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 98 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : int , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase__ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = resample
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : str = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : int = do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Dict , )-> np.ndarray:
lowerCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray:
lowerCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : str , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Tuple , )-> Optional[Any]:
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase__ : Tuple = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Tuple] = None )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = target_sizes.numpy()
lowerCAmelCase__ : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = logits.argmax(dim=1 )
lowerCAmelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 131 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : List[str] = """markuplm"""
def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=3_0_5_2_2 , UpperCamelCase__ : Dict=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Optional[Any]=1_2 , UpperCamelCase__ : Union[str, Any]=3_0_7_2 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=5_1_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Tuple=1e-12 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : int=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=2_5_6 , UpperCamelCase__ : Optional[Any]=1_0_2_4 , UpperCamelCase__ : Optional[int]=2_1_6 , UpperCamelCase__ : Union[str, Any]=1_0_0_1 , UpperCamelCase__ : int=3_2 , UpperCamelCase__ : Union[str, Any]=5_0 , UpperCamelCase__ : List[Any]="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : List[str] , )-> Dict:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: List[Any] = vocab_size
__lowerCAmelCase: int = hidden_size
__lowerCAmelCase: Any = num_hidden_layers
__lowerCAmelCase: str = num_attention_heads
__lowerCAmelCase: List[Any] = hidden_act
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: Tuple = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Tuple = max_position_embeddings
__lowerCAmelCase: Optional[int] = type_vocab_size
__lowerCAmelCase: Any = initializer_range
__lowerCAmelCase: Dict = layer_norm_eps
__lowerCAmelCase: str = position_embedding_type
__lowerCAmelCase: Tuple = use_cache
__lowerCAmelCase: Union[str, Any] = classifier_dropout
# additional properties
__lowerCAmelCase: Optional[int] = max_depth
__lowerCAmelCase: List[Any] = max_xpath_tag_unit_embeddings
__lowerCAmelCase: int = max_xpath_subs_unit_embeddings
__lowerCAmelCase: Any = tag_pad_id
__lowerCAmelCase: Tuple = subs_pad_id
__lowerCAmelCase: List[Any] = xpath_unit_hidden_size
| 369 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer
SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states"""
@property
def lowercase_ ( self : Dict)-> str:
'''simple docstring'''
__lowerCAmelCase: str = 4
__lowerCAmelCase: int = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: str = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Any = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str=0)-> str:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = 4
__lowerCAmelCase: Dict = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: List[str] = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return (4, 8)
@property
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
return (4, 8)
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__lowerCAmelCase: Any = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowercase_ ( self : List[str])-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase: Tuple = self.model_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy")
__lowerCAmelCase: Union[str, Any] = model.to(UpperCamelCase__)
if hasattr(UpperCamelCase__ , "set_default_attn_processor"):
model.set_default_attn_processor()
__lowerCAmelCase: str = self.get_dummy_seed_input()
with torch.no_grad():
__lowerCAmelCase: Dict = model(**UpperCamelCase__)[0]
__lowerCAmelCase: Dict = output[0, :5].flatten().cpu()
print(UpperCamelCase__)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowerCAmelCase: List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2))
@slow
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : int=7_7 , UpperCamelCase__ : Any=0)-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = batch_size
__lowerCAmelCase: Any = embedding_dim
__lowerCAmelCase: Dict = num_embeddings
__lowerCAmelCase: Dict = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: str = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior")
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = self.get_dummy_seed_input(seed=UpperCamelCase__)
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)[0]
assert list(sample.shape) == [1, 7_6_8]
__lowerCAmelCase: Dict = sample[0, :8].flatten().cpu()
print(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = torch.tensor(UpperCamelCase__)
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)
| 108 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase :Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : List[Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
A_ : List[str] = getattr(__snake_case , __snake_case ).shape
else:
A_ : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A_ : Union[str, Any] = value
elif weight_type == "weight_g":
A_ : str = value
elif weight_type == "weight_v":
A_ : int = value
elif weight_type == "bias":
A_ : Any = value
else:
A_ : Tuple = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = []
A_ : str = fairseq_model.state_dict()
A_ : Optional[int] = hf_model.feature_extractor
A_ : Optional[Any] = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == """group""" , )
A_ : Optional[Any] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__snake_case , __snake_case , __snake_case , __snake_case )
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : Any = name.split(__snake_case )[0].split(""".""" )[-2]
A_ : str = mapped_key.replace("""*""" , __snake_case )
if "weight_g" in name:
A_ : List[Any] = """weight_g"""
elif "weight_v" in name:
A_ : Optional[Any] = """weight_v"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : List[Any] = """weight"""
else:
A_ : str = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = full_name.split("""conv_layers.""" )[-1]
A_ : int = name.split(""".""" )
A_ : Optional[int] = int(items[0] )
A_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A_ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A_ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A_ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A_ : int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = full_name.split("""adaptor.""" )[-1]
A_ : Tuple = name.split(""".""" )
if items[1].isdigit():
A_ : Tuple = int(items[1] )
else:
A_ : List[str] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A_ : Union[str, Any] = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A_ : Optional[int] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A_ : Union[str, Any] = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A_ : Optional[int] = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__snake_case , __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A_ : List[str] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A_ : Optional[Any] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : Tuple = emb.weight.shape
A_ : str = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
A_ : int = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
A_ : Tuple = WavaVecaConfig.from_pretrained(
__snake_case , add_adapter=__snake_case , adapter_stride=__snake_case , adapter_kernel_size=__snake_case , use_auth_token=__snake_case , output_hidden_size=__snake_case , )
A_ : Union[str, Any] = MBartConfig.from_pretrained(__snake_case )
# load model
A_, A_, A_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A_ : str = model[0].eval()
# load feature extractor
A_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(__snake_case , use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
A_ : List[str] = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder , __snake_case )
# load decoder weights
A_ : Any = MBartForCausalLM(__snake_case )
A_, A_ : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A_ : Any = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
A_ : Any = False
A_ : str = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : str = tokenizer.pad_token_id
A_ : int = tokenizer.bos_token_id
A_ : str = tokenizer.eos_token_id
A_ : int = """mbart50"""
A_ : List[str] = """wav2vec2"""
A_ : Optional[int] = tokenizer.eos_token_id
A_ : List[Any] = 25_00_04
A_ : Optional[Any] = tokenizer.eos_token_id
A_ : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
lowerCamelCase :List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 206 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if isinstance(__a , __a):
_UpperCamelCase = [label.strip() for label in labels.split(''',''') if label.strip()]
return labels
def __call__( self , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
if len(__a) == 0 or len(__a) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__a))
if isinstance(__a , __a):
_UpperCamelCase = [sequences]
_UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase )
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = args_parser
super().__init__(*__a , **__a)
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''')
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail'''):
return ind
return -1
def UpperCAmelCase ( self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''')
_UpperCamelCase = self.tokenizer.eos_token
try:
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
if kwargs.get('''multi_class''' , __a) is not None:
_UpperCamelCase = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''')
_UpperCamelCase = {}
if "candidate_labels" in kwargs:
_UpperCamelCase = self._args_parser._parse_labels(kwargs['''candidate_labels'''])
if "hypothesis_template" in kwargs:
_UpperCamelCase = kwargs['''hypothesis_template''']
_UpperCamelCase = {}
if "multi_label" in kwargs:
_UpperCamelCase = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , __a , *__a , **__a , ) -> int:
'''simple docstring'''
if len(__a) == 0:
pass
elif len(__a) == 1 and "candidate_labels" not in kwargs:
_UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''')
return super().__call__(__a , **__a)
def UpperCAmelCase ( self , __a , __a=None , __a="This example is {}.") -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self._args_parser(__a , __a , __a)
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a)):
_UpperCamelCase = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a) - 1,
**model_input,
}
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = inputs['''candidate_label''']
_UpperCamelCase = inputs['''sequence''']
_UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCamelCase = self.model(**__a)
_UpperCamelCase = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self , __a , __a=False) -> Dict:
'''simple docstring'''
_UpperCamelCase = [outputs['''candidate_label'''] for outputs in model_outputs]
_UpperCamelCase = [outputs['''sequence'''] for outputs in model_outputs]
_UpperCamelCase = np.concatenate([output['''logits'''].numpy() for output in model_outputs])
_UpperCamelCase = logits.shape[0]
_UpperCamelCase = len(__a)
_UpperCamelCase = N // n
_UpperCamelCase = logits.reshape((num_sequences, n, -1))
if multi_label or len(__a) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCamelCase = self.entailment_id
_UpperCamelCase = -1 if entailment_id == 0 else 0
_UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCamelCase = reshaped_outputs[..., self.entailment_id]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 194 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : Any , _lowerCamelCase : Any ):
_snake_case = question_encoder
_snake_case = generator
_snake_case = self.question_encoder
def lowercase ( self : List[Any] , _lowerCamelCase : Union[str, Any] ):
if os.path.isfile(_lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_snake_case = os.path.join(_lowerCamelCase , '''question_encoder_tokenizer''' )
_snake_case = os.path.join(_lowerCamelCase , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(_lowerCamelCase )
self.generator.save_pretrained(_lowerCamelCase )
@classmethod
def lowercase ( cls : List[Any] , _lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Any ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_snake_case = kwargs.pop('''config''' , _lowerCamelCase )
if config is None:
_snake_case = RagConfig.from_pretrained(_lowerCamelCase )
_snake_case = AutoTokenizer.from_pretrained(
_lowerCamelCase , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_snake_case = AutoTokenizer.from_pretrained(
_lowerCamelCase , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=_lowerCamelCase , generator=_lowerCamelCase )
def __call__( self : Any , *_lowerCamelCase : Any , **_lowerCamelCase : int ):
return self.current_tokenizer(*_lowerCamelCase , **_lowerCamelCase )
def lowercase ( self : int , *_lowerCamelCase : int , **_lowerCamelCase : int ):
return self.generator.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def lowercase ( self : str , *_lowerCamelCase : Any , **_lowerCamelCase : Any ):
return self.generator.decode(*_lowerCamelCase , **_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = self.question_encoder
def lowercase ( self : List[Any] ):
_snake_case = self.generator
def lowercase ( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : str = "longest" , _lowerCamelCase : str = None , _lowerCamelCase : bool = True , **_lowerCamelCase : str , ):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _lowerCamelCase , )
if max_length is None:
_snake_case = self.current_tokenizer.model_max_length
_snake_case = self(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , max_length=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_snake_case = self.current_tokenizer.model_max_length
_snake_case = self(
text_target=_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = labels['''input_ids''']
return model_inputs
| 40 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : int=32 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[int]=[10, 20, 30, 40] , _lowerCamelCase : Dict=[2, 2, 3, 2] , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Any=0.0_2 , _lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _lowerCamelCase : Any=[2, 3, 4] , _lowerCamelCase : Any=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = out_features
_snake_case = out_indices
_snake_case = scope
def lowercase ( self : Dict ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : str ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
_snake_case = ConvNextVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ):
_snake_case = ConvNextVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case = None
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self : str ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
def lowercase ( self : int ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__a = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : str ):
_snake_case = ConvNextVaModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowercase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Dict ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowercase ( self : int ):
pass
def lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = True
if model_class.__name__ in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]:
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Dict ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = False
_snake_case = True
if (
model_class.__name__
in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Optional[int] ):
def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ):
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : str ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ConvNextVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : List[Any] ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ):
_snake_case = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = preprocessor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 40 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'train'
__UpperCamelCase : Optional[Any] = 'dev'
__UpperCamelCase : Dict = 'test'
class a__ :
"""simple docstring"""
@staticmethod
def _snake_case (__lowercase , __lowercase ):
raise NotImplementedError
@staticmethod
def _snake_case (__lowercase ):
raise NotImplementedError
@staticmethod
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase="[CLS]" , __lowercase=1 , __lowercase="[SEP]" , __lowercase=False , __lowercase=False , __lowercase=0 , __lowercase=0 , __lowercase=-1_00 , __lowercase=0 , __lowercase=True , ):
__lowerCAmelCase = {label: i for i, label in enumerate(__lowercase )}
__lowerCAmelCase = []
for ex_index, example in enumerate(__lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , __lowercase , len(__lowercase ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowercase ) > 0:
tokens.extend(__lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(__lowercase ) > max_seq_length - special_tokens_count:
__lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCAmelCase = [sequence_a_segment_id] * len(__lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCAmelCase = [cls_token] + tokens
__lowerCAmelCase = [pad_token_label_id] + label_ids
__lowerCAmelCase = [cls_token_segment_id] + segment_ids
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(__lowercase )
# Zero-pad up to the sequence length.
__lowerCAmelCase = max_seq_length - len(__lowercase )
if pad_on_left:
__lowerCAmelCase = ([pad_token] * padding_length) + input_ids
__lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , label_ids=__lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase=False , __lowercase = Split.train , ):
# Load data features from cache or dataset file
__lowerCAmelCase = os.path.join(
__lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + '''.lock'''
with FileLock(__lowercase ):
if os.path.exists(__lowercase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
__lowerCAmelCase = torch.load(__lowercase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
__lowerCAmelCase = token_classification_task.read_examples_from_file(__lowercase , __lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
__lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , __lowercase )
def __len__(self ):
return len(self.features )
def __getitem__(self , __lowercase ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
"""simple docstring"""
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase=False , __lowercase = Split.train , ):
__lowerCAmelCase = token_classification_task.read_examples_from_file(__lowercase , __lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
__lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = tf.data.Dataset.from_generator(
__lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCAmelCase = tf.data.Dataset.from_generator(
__lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _snake_case (self ):
__lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ):
return len(self.features )
def __getitem__(self , __lowercase ):
return self.features[i]
| 174 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case (self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = CLIPTextModelWithProjection(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = sd_pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs['''prompt''']]
__lowerCAmelCase = sd_pipe(**__lowercase )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
__lowerCAmelCase = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ):
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 174 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = [1]
for i in range(2 , lowercase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCamelCase_ = []
lowerCamelCase_ = list(range(lowercase ) )
# Find permutation
while factorials:
lowerCamelCase_ = factorials.pop()
lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , lowercase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''transfo-xl'''
UpperCamelCase = ['''mems''']
UpperCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A_ : Optional[Any]=267735 , A_ : Optional[Any]=[20000, 40000, 200000] , A_ : Union[str, Any]=1024 , A_ : Optional[Any]=1024 , A_ : Optional[int]=16 , A_ : Any=64 , A_ : List[Any]=4096 , A_ : str=4 , A_ : int=False , A_ : List[Any]=18 , A_ : Optional[int]=1600 , A_ : Union[str, Any]=1000 , A_ : Optional[Any]=True , A_ : Optional[int]=True , A_ : List[str]=0 , A_ : int=-1 , A_ : List[Any]=True , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Dict=True , A_ : Dict="normal" , A_ : Dict=0.01 , A_ : Optional[Any]=0.01 , A_ : Any=0.02 , A_ : int=1E-5 , A_ : List[str]=0 , **A_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
lowerCamelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase_ = [False] + [False] * len(self.cutoffs )
lowerCamelCase_ = d_model
lowerCamelCase_ = d_embed
lowerCamelCase_ = d_head
lowerCamelCase_ = d_inner
lowerCamelCase_ = div_val
lowerCamelCase_ = pre_lnorm
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = mem_len
lowerCamelCase_ = same_length
lowerCamelCase_ = attn_type
lowerCamelCase_ = clamp_len
lowerCamelCase_ = sample_softmax
lowerCamelCase_ = adaptive
lowerCamelCase_ = dropout
lowerCamelCase_ = dropatt
lowerCamelCase_ = untie_r
lowerCamelCase_ = init
lowerCamelCase_ = init_range
lowerCamelCase_ = proj_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 208 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : List[str]
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default='Translation' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ) -> Optional[int]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Optional[List] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default='TranslationVariableLanguages' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self ) -> Tuple:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] = set(self.languages )
if self.languages and set(_SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
F"Some languages in example ({', '.join(sorted(set(_SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({', '.join(_SCREAMING_SNAKE_CASE )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase : str = []
for lang, text in translation_dict.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase , UpperCAmelCase : Optional[Any] = zip(*sorted(_SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 109 |
'''simple docstring'''
from __future__ import annotations
A_ = list[list[int]]
# assigning initial values to the grid
A_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ ( snake_case , snake_case , snake_case , snake_case ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ ( snake_case ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ ( snake_case ):
if location := find_empty_location(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[str] = digit
if sudoku(snake_case ) is not None:
return grid
SCREAMING_SNAKE_CASE:List[Any] = 0
return None
def A_ ( snake_case ):
for row in grid:
for cell in row:
print(snake_case , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
A_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 139 | 0 |
from collections.abc import Callable
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a
SCREAMING_SNAKE_CASE__ = b
if function(UpperCamelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase_ ) == 0:
return b
elif (
function(UpperCamelCase_ ) * function(UpperCamelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase_ ) == 0:
return mid
elif function(UpperCamelCase_ ) * function(UpperCamelCase_ ) < 0:
SCREAMING_SNAKE_CASE__ = mid
else:
SCREAMING_SNAKE_CASE__ = mid
SCREAMING_SNAKE_CASE__ = start + (end - start) / 2.0
return mid
def _lowercase ( UpperCamelCase_ ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 169 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
A__ : str =field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A__ : ClassVar[Features] =Features({"""audio""": Audio()} )
A__ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
A__ : str ="audio"
A__ : str ="labels"
def A_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def A_ ( self : Union[str, Any] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 169 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
A__ = HfArgumentParser(UpperCAmelCase_ )
A__ = parser.parse_args_into_dataclasses()[0]
A__ = TensorFlowBenchmark(args=UpperCAmelCase_ )
try:
A__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A__ = """ """.join(str(UpperCAmelCase_ ).split(""" """ )[:-1] )
A__ = """"""
A__ = eval(str(UpperCAmelCase_ ).split(""" """ )[-1] )
A__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
A__ = full_error_msg + begin_error_msg + str(UpperCAmelCase_ )
raise ValueError(UpperCAmelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( UpperCAmelCase ):
_lowercase = ["pixel_values"]
def __init__( self , A_ = True , A_ = 32 , A_=PILImageResampling.BILINEAR , A_ = True , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = do_resize
_UpperCAmelCase : List[str] = do_rescale
_UpperCAmelCase : List[Any] = size_divisor
_UpperCAmelCase : Optional[Any] = resample
super().__init__(**A_ )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ = None , **A_ ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = get_image_size(A_ )
# Rounds the height and width down to the closest multiple of size_divisor
_UpperCAmelCase : str = height // size_divisor * size_divisor
_UpperCAmelCase : Union[str, Any] = width // size_divisor * size_divisor
_UpperCAmelCase : Dict = resize(A_ , (new_h, new_w) , resample=A_ , data_format=A_ , **A_ )
return image
def _UpperCAmelCase ( self , A_ , A_ , A_ = None , **A_ ):
'''simple docstring'''
return rescale(image=A_ , scale=A_ , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , A_=None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Tuple = size_divisor if size_divisor is not None else self.size_divisor
_UpperCAmelCase : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
_UpperCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
_UpperCAmelCase : Tuple = [to_numpy_array(A_ ) for img in images]
if do_resize:
_UpperCAmelCase : str = [self.resize(A_ , size_divisor=A_ , resample=A_ ) for image in images]
if do_rescale:
_UpperCAmelCase : Dict = [self.rescale(A_ , scale=1 / 255 ) for image in images]
_UpperCAmelCase : Optional[int] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_UpperCAmelCase : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 189 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = botoa.client("iam" )
SCREAMING_SNAKE_CASE_: Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase_ , AssumeRolePolicyDocument=json.dumps(lowercase_ , indent=2 ) )
SCREAMING_SNAKE_CASE_: Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase_ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(lowercase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = botoa.client("iam" )
return iam_client.get_role(RoleName=lowercase_ )["Role"]["Arn"]
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , lowercase_ , )
SCREAMING_SNAKE_CASE_: str = None
if credentials_configuration == 0:
SCREAMING_SNAKE_CASE_: Any = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
SCREAMING_SNAKE_CASE_: Any = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_field("AWS Access Key ID: " )
SCREAMING_SNAKE_CASE_: Any = aws_access_key_id
SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_field("AWS Secret Access Key: " )
SCREAMING_SNAKE_CASE_: List[Any] = aws_secret_access_key
SCREAMING_SNAKE_CASE_: Dict = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
SCREAMING_SNAKE_CASE_: Union[str, Any] = aws_region
SCREAMING_SNAKE_CASE_: Tuple = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , lowercase_ , )
if role_management == 0:
SCREAMING_SNAKE_CASE_: str = _ask_field("Enter your IAM role name: " )
else:
SCREAMING_SNAKE_CASE_: Tuple = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(lowercase_ )
SCREAMING_SNAKE_CASE_: Any = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
SCREAMING_SNAKE_CASE_: Dict = None
if is_custom_docker_image:
SCREAMING_SNAKE_CASE_: Any = _ask_field("Enter your Docker image: " , lambda _UpperCAmelCase : str(lowercase_ ).lower() )
SCREAMING_SNAKE_CASE_: List[str] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
SCREAMING_SNAKE_CASE_: Dict = None
if is_sagemaker_inputs_enabled:
SCREAMING_SNAKE_CASE_: int = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda _UpperCAmelCase : str(lowercase_ ).lower() , )
SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
SCREAMING_SNAKE_CASE_: Tuple = None
if is_sagemaker_metrics_enabled:
SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda _UpperCAmelCase : str(lowercase_ ).lower() , )
SCREAMING_SNAKE_CASE_: List[str] = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Union[str, Any] = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
if use_dynamo:
SCREAMING_SNAKE_CASE_: Optional[int] = """dynamo_"""
SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
SCREAMING_SNAKE_CASE_: Any = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
if use_custom_options:
SCREAMING_SNAKE_CASE_: Union[str, Any] = _ask_options(
"Which mode do you want to use?" , lowercase_ , lambda _UpperCAmelCase : TORCH_DYNAMO_MODES[int(lowercase_ )] , default="default" , )
SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
SCREAMING_SNAKE_CASE_: Dict = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
SCREAMING_SNAKE_CASE_: Tuple = _ask_options(
lowercase_ , lowercase_ , lambda _UpperCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
SCREAMING_SNAKE_CASE_: List[str] = _ask_field(lowercase_ , lambda _UpperCAmelCase : str(lowercase_ ).lower() , default="ml.p3.2xlarge" )
SCREAMING_SNAKE_CASE_: Any = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
SCREAMING_SNAKE_CASE_: List[Any] = _ask_field(
"How many machines do you want use? [1]: " , lowercase_ , default=1 , )
SCREAMING_SNAKE_CASE_: List[str] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowercase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase_ , use_cpu=lowercase_ , dynamo_config=lowercase_ , eca_instance_type=lowercase_ , profile=lowercase_ , region=lowercase_ , iam_role_name=lowercase_ , mixed_precision=lowercase_ , num_machines=lowercase_ , sagemaker_inputs_file=lowercase_ , sagemaker_metrics_file=lowercase_ , )
| 13 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase (lowercase_: str ) -> Dict:
A__ : int = int(lowercase_ )
A__ , A__ , A__ : Tuple = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Tuple , lowercase_: Any=300 ) -> Optional[int]:
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ : Tuple = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A__ : str = f"""{elt:.6f}""" if isinstance(lowercase_ , lowercase_ ) else str(lowercase_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _a :
'''simple docstring'''
UpperCAmelCase__: str = 5
UpperCAmelCase__: int = 0.2
def __init__( self , A__ , A__ = None , A__ = True , A__ = None , A__ = 300 , ):
A__ : Optional[int] = total
A__ : Tuple = """""" if prefix is None else prefix
A__ : str = leave
A__ : str = parent
A__ : int = width
A__ : Dict = None
A__ : List[str] = None
A__ : Optional[int] = None
def __A ( self , A__ , A__ = False , A__ = None ):
A__ : Tuple = value
if comment is not None:
A__ : Any = comment
if self.last_value is None:
A__ : int = time.time()
A__ : Dict = value
A__ : int = None
A__ : int = self.warmup
A__ : str = 1
self.update_bar(A__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
A__ : Any = time.time()
A__ : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A__ : Dict = self.elapsed_time / (value - self.start_value)
else:
A__ : List[str] = None
if value >= self.total:
A__ : Optional[Any] = self.total
A__ : List[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A__ : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(A__ )
A__ : Any = value
A__ : List[str] = current_time
if self.average_time_per_item is None:
A__ : str = 1
else:
A__ : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __A ( self , A__ , A__=None ):
A__ : Tuple = """ """ * (len(str(self.total ) ) - len(str(A__ ) )) + str(A__ )
if self.elapsed_time is None:
A__ : Union[str, Any] = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
A__ : Tuple = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
A__ : Optional[int] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __A ( self ):
A__ : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A__ : str = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__=None ):
super().__init__(A__ )
A__ : Optional[Any] = None if column_names is None else [column_names]
A__ : Optional[Any] = None
def __A ( self ):
A__ : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A__ : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __A ( self , A__ ):
if self.inner_table is None:
A__ : List[str] = [list(values.keys() ), list(values.values() )]
else:
A__ : Optional[Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A__ )
A__ : Any = columns
self.inner_table.append([values[c] for c in columns] )
def __A ( self , A__ , A__=None , A__=300 ):
A__ : Optional[Any] = NotebookProgressBar(A__ , prefix=A__ , parent=self , width=A__ )
return self.child_bar
def __A ( self ):
A__ : List[str] = None
self.display()
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self ):
A__ : int = None
A__ : List[str] = None
A__ : Union[str, Any] = False
def __A ( self , A__ , A__ , A__ , **A__ ):
A__ : List[str] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
A__ : Dict = 0
A__ : Tuple = 0
A__ : Optional[int] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
A__ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , A__ )
def __A ( self , A__ , A__ , A__ , **A__ ):
A__ : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
A__ : str = False
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
if not has_length(A__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A__ : Union[str, Any] = self.training_tracker.add_child(len(A__ ) )
else:
A__ : Tuple = NotebookProgressBar(len(A__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __A ( self , A__ , A__ , A__ , **A__ ):
if self.prediction_bar is not None:
self.prediction_bar.close()
A__ : List[str] = None
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A__ : Dict = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
A__ : List[Any] = state.global_step
self.training_tracker.write_line(A__ )
def __A ( self , A__ , A__ , A__ , A__=None , **A__ ):
if self.training_tracker is not None:
A__ : Tuple = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
A__ : Dict = log["""loss"""]
break
if self.first_column == "Epoch":
A__ : List[Any] = int(state.epoch )
else:
A__ : Optional[Any] = state.global_step
A__ : Optional[Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
A__ : Optional[int] = re.sub(r"""\_loss$""" , """""" , A__ )
A__ : int = metrics.pop("""total_flos""" , A__ )
A__ : int = metrics.pop("""epoch""" , A__ )
A__ : Optional[int] = metrics.pop(F"""{metric_key_prefix}_runtime""" , A__ )
A__ : Any = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , A__ )
A__ : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , A__ )
A__ : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , A__ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
A__ : Any = v
else:
A__ : Optional[Any] = k.split("""_""" )
A__ : Any = """ """.join([part.capitalize() for part in splits[1:]] )
A__ : List[str] = v
self.training_tracker.write_line(A__ )
self.training_tracker.remove_child()
A__ : Dict = None
# Evaluation takes a long time so we should force the next update.
A__ : Union[str, Any] = True
def __A ( self , A__ , A__ , A__ , **A__ ):
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=A__ )
A__ : Optional[int] = None
| 192 | 0 |
def _lowerCamelCase( lowercase__ ) -> list[list]:
'''simple docstring'''
__lowercase= current_set.copy()
for row_index, row in enumerate(lowercase__ ):
__lowercase= row[0]
for column_index, column in enumerate(lowercase__ ):
if magnitude == 0:
__lowercase= column
continue
__lowercase= column / magnitude
# Subtract to cancel term
__lowercase= current_set[0]
__lowercase= [first_row]
__lowercase= current_set[1::]
for row in current_set:
__lowercase= []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowercase__ )
continue
for column_index in range(len(lowercase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowercase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__lowercase= final_set[0]
__lowercase= []
__lowercase= []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__lowercase= simplify(lowercase__ )
for i in range(len(lowercase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowercase__ )
__lowercase= resultant
return final_set
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
if len(lowercase__ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
__lowercase= len(lowercase__ ) + 1
if any(len(lowercase__ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowercase__ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowercase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
__lowercase= equations.copy()
if any(0 in row for row in data_set ):
__lowercase= data_set.copy()
__lowercase= []
for row_index, row in enumerate(lowercase__ ):
if 0 not in row:
__lowercase= data_set.pop(lowercase__ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , lowercase__ )
__lowercase= data_set.copy()
__lowercase= simplify(lowercase__ )
__lowercase= simplified[::-1]
__lowercase= []
for row in simplified:
__lowercase= row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__lowercase= row.copy()[: len(lowercase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowercase__ ) == 0:
solutions.append(0 )
continue
__lowercase= temp_row[1::]
__lowercase= temp_row[::-1]
for column_index, column in enumerate(lowercase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowercase__ )
__lowercase= []
for item in solutions:
final.append(float(round(lowercase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 304 |
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304 | 1 |
import numpy as np
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1E-12, __lowerCamelCase = 1_00, ):
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__lowerCamelCase ) == np.iscomplexobj(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.iscomplexobj(__lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__lowerCamelCase, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1E12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE_ = np.dot(__lowerCamelCase, __lowerCamelCase )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE_ = w / np.linalg.norm(__lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE_ = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE_ = np.dot(__lowerCamelCase, np.dot(__lowerCamelCase, __lowerCamelCase ) )
# Check convergence.
SCREAMING_SNAKE_CASE_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = lambda_
if is_complex:
SCREAMING_SNAKE_CASE_ = np.real(lambda_ )
return lambda_, vector
def A__ ( ):
SCREAMING_SNAKE_CASE_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
SCREAMING_SNAKE_CASE_ = np.array([41, 4, 20] )
SCREAMING_SNAKE_CASE_ = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE_ = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE_ = real_input_matrix
SCREAMING_SNAKE_CASE_ = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE_ = complex_input_matrix
SCREAMING_SNAKE_CASE_ = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = power_iteration(__lowerCamelCase, __lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.linalg.eigh(__lowerCamelCase )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__lowerCamelCase ) - np.abs(__lowerCamelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 299 |
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299 | 1 |
import math
class lowercase_ :
def __init__( self , lowercase_=0 ): # a graph with Node 0,1,...,N-1
_snake_case : Any = n
_snake_case : Union[str, Any] = [
[math.inf for j in range(0 , _snake_case )] for i in range(0 , _snake_case )
] # adjacency matrix for weight
_snake_case : int = [
[math.inf for j in range(0 , _snake_case )] for i in range(0 , _snake_case )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Any = w
def UpperCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_snake_case : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
return self.dp[u][v]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 361 | from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
__SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
__SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def snake_case (__lowercase , __lowercase ) -> str | None:
'''simple docstring'''
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(__lowercase ) , __lowercase ):
_snake_case : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowercase )
return decoded
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
_snake_case : list[str] = []
for key in product(__lowercase , repeat=3 ):
_snake_case : Union[str, Any] = try_key(__lowercase , __lowercase )
if encoded is not None:
possibles.append(__lowercase )
return possibles
def snake_case (__lowercase , __lowercase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def snake_case (__lowercase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(__lowercase ).parent.joinpath(__lowercase ).read_text(encoding="utf-8" )
_snake_case : Dict = [int(__lowercase ) for number in data.strip().split("," )]
_snake_case : Tuple = filter_valid_chars(__lowercase )
for common_word in COMMON_WORDS:
_snake_case : Optional[int] = filter_common_word(__lowercase , __lowercase )
if len(__lowercase ) == 1:
break
_snake_case : int = possibles[0]
return sum(ord(__lowercase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''') | 284 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str | Literal[False]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = list(_UpperCAmelCase )
_UpperCAmelCase : Dict = list(_UpperCAmelCase )
_UpperCAmelCase : List[str] = 0
for i in range(len(_UpperCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
_UpperCAmelCase : Tuple = "_"
if count > 1:
return False
else:
return "".join(_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : list[str] ) -> list[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = []
while True:
_UpperCAmelCase : List[str] = ["$"] * len(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
_UpperCAmelCase : Optional[Any] = "*"
_UpperCAmelCase : Optional[Any] = "*"
temp.append("X" )
for i in range(len(_UpperCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_UpperCAmelCase ) == 0:
return pi
_UpperCAmelCase : Dict = list(set(_UpperCAmelCase ) )
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Sequence[float] ) -> list[str]:
"""simple docstring"""
_UpperCAmelCase : int = []
for minterm in minterms:
_UpperCAmelCase : str = ""
for _ in range(_UpperCAmelCase ):
_UpperCAmelCase : List[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_UpperCAmelCase )
return temp
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase : Tuple = list(_UpperCAmelCase )
_UpperCAmelCase : Dict = list(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_UpperCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_ ( _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[str] ) -> list[str]:
"""simple docstring"""
_UpperCAmelCase : str = []
_UpperCAmelCase : str = [0] * len(_UpperCAmelCase )
for i in range(len(chart[0] ) ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Dict = -1
for j in range(len(_UpperCAmelCase ) ):
if chart[j][i] == 1:
count += 1
_UpperCAmelCase : int = j
if count == 1:
_UpperCAmelCase : int = 1
for i in range(len(_UpperCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_UpperCAmelCase ) ):
_UpperCAmelCase : Dict = 0
temp.append(prime_implicants[i] )
while True:
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Tuple = 0
for i in range(len(_UpperCAmelCase ) ):
_UpperCAmelCase : List[Any] = chart[i].count(1 )
if count_n > max_n:
_UpperCAmelCase : Optional[int] = count_n
_UpperCAmelCase : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_UpperCAmelCase ) ):
_UpperCAmelCase : Tuple = 0
def UpperCamelCase_ ( _UpperCAmelCase : list[str] , _UpperCAmelCase : list[str] ) -> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase : int = [[0 for x in range(len(_UpperCAmelCase ) )] for x in range(len(_UpperCAmelCase ) )]
for i in range(len(_UpperCAmelCase ) ):
_UpperCAmelCase : Optional[int] = prime_implicants[i].count("_" )
for j in range(len(_UpperCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _UpperCAmelCase ):
_UpperCAmelCase : List[Any] = 1
return chart
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : Dict = int(input("Enter the no. of variables\n" ) )
_UpperCAmelCase : Dict = [
float(_UpperCAmelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_UpperCAmelCase : List[str] = decimal_to_binary(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : int = check(_UpperCAmelCase )
print("Prime Implicants are:" )
print(_UpperCAmelCase )
_UpperCAmelCase : Tuple = prime_implicant_chart(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = selection(_UpperCAmelCase , _UpperCAmelCase )
print("Essential Prime Implicants are:" )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : Dict , A : Optional[int]=7 , A : Tuple=3 , A : Optional[Any]=10 , A : int=18 , A : Dict=30 , A : List[str]=400 , A : int=True , A : Optional[Any]=None , A : Optional[Any]=True , A : List[Any]=[0.5, 0.5, 0.5] , A : List[str]=[0.5, 0.5, 0.5] , A : Optional[int]=None , ):
_UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 18}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[Any] = num_frames
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : Any = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : str = size
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Tuple = image_std
_UpperCAmelCase : Any = crop_size
def _A ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = VivitImageProcessor if is_vision_available() else None
def _A ( self : int ):
_UpperCAmelCase : Tuple = VivitImageProcessingTester(self )
@property
def _A ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "do_center_crop" ) )
self.assertTrue(hasattr(A , "size" ) )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Tuple ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 31 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( snake_case_ : dict ) -> tuple:
return (data["data"], data["target"])
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> XGBClassifier:
__snake_case = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def lowerCamelCase__ ( ) -> None:
__snake_case = load_iris()
__snake_case , __snake_case = data_handling(snake_case_ )
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
__snake_case = iris['''target_names''']
# Create an XGBoost Classifier from the training data
__snake_case = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 238 |
from __future__ import annotations
snake_case_ = [True] * 1000001
snake_case_ = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
snake_case_ = False
i += 1
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
return seive[n]
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
return any(digit in '''02468''' for digit in str(snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : int = 100_0000 ) -> list[int]:
__snake_case = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(snake_case_ ) and not contains_an_even_digit(snake_case_ ):
__snake_case = str(snake_case_ )
__snake_case = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case_ ) )]
if all(is_prime(snake_case_ ) for i in list_nums ):
result.append(snake_case_ )
return result
def lowerCamelCase__ ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 238 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
_A : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_A : Any = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __magic_name__ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Union[str, Any]:
for attribute in key.split("." ):
lowercase : Tuple = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase : str = getattr(__snake_case , __snake_case ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase : Optional[Any] = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : int = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __magic_name__ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Optional[int]:
lowercase : Optional[int] = []
lowercase : Optional[int] = fairseq_model.state_dict()
lowercase : Dict = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : int = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
lowercase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : List[str] = True
if "*" in mapped_key:
lowercase : Optional[Any] = name.split(__snake_case )[0].split("." )[-2]
lowercase : Dict = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
lowercase : Optional[Any] = "weight_g"
elif "weight_v" in name:
lowercase : List[str] = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : int = "weight"
else:
lowercase : Any = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : int , __snake_case : Dict ) -> Tuple:
lowercase : Union[str, Any] = full_name.split("conv_layers." )[-1]
lowercase : str = name.split("." )
lowercase : Optional[int] = int(items[0] )
lowercase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def __magic_name__ ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any=None ) -> Any:
# load the pre-trained checkpoints
lowercase : Union[str, Any] = torch.load(__snake_case )
lowercase : Union[str, Any] = WavLMConfigOrig(checkpoint["cfg"] )
lowercase : List[Any] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase : List[str] = WavLMConfig.from_pretrained(__snake_case )
else:
lowercase : List[str] = WavLMConfig()
lowercase : Union[str, Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_A : str = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 202 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_A : int = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class a__ ( unittest.TestCase, a_ ):
def __magic_name__ ( self ):
lowercase : Tuple = load_tool("text-question-answering" )
self.tool.setup()
lowercase : Dict = load_tool("text-question-answering" , remote=_a )
def __magic_name__ ( self ):
lowercase : str = self.tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.remote_tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : int = self.tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.remote_tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
| 202 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = [[float('inf' ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE_ ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE_ ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE_ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCAmelCase__ : int = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return dist, v
if __name__ == "__main__":
lowerCamelCase__ = int(input("""Enter number of vertices: """))
lowerCamelCase__ = int(input("""Enter number of edges: """))
lowerCamelCase__ = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
lowerCamelCase__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
lowerCamelCase__ = int(input("""Enter source:"""))
lowerCamelCase__ = int(input("""Enter destination:"""))
lowerCamelCase__ = float(input("""Enter weight:"""))
lowerCamelCase__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0 | 357 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ ) | 307 | 0 |
from itertools import product
def a( A : int , A : int ) -> list[int]:
"""simple docstring"""
a = sides_number
a = max_face_number * dice_number
a = [0] * (max_total + 1)
a = 1
a = range(A , max_face_number + 1 )
for dice_numbers in product(A , repeat=A ):
a = sum(A )
totals_frequencies[total] += 1
return totals_frequencies
def a( ) -> float:
"""simple docstring"""
a = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a = 0
a = 9
a = 4 * 9
a = 6
for peter_total in range(A , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a = (4**9) * (6**6)
a = peter_wins_count / total_games_number
a = round(A , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 227 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase: Union[str, Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: int = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowercase: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 227 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
__lowerCAmelCase : List[Any] =set(
"""approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = "new" , lowerCAmelCase__ :list | None = None ) -> Tuple:
'''simple docstring'''
lowercase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a__ ) - valid_terms ) ):
lowercase = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a__ )
lowercase = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
lowercase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a__ )}
lowercase = {}
for id_ in range(a__ ):
lowercase = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 370 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "roc_bert"
def __init__( self : List[str] ,_snake_case : Any=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : Union[str, Any]=12 ,_snake_case : List[Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : Optional[int]="gelu" ,_snake_case : int=0.1 ,_snake_case : Any=0.1 ,_snake_case : int=512 ,_snake_case : Optional[int]=2 ,_snake_case : List[str]=0.02 ,_snake_case : Dict=1e-12 ,_snake_case : str=True ,_snake_case : Tuple=0 ,_snake_case : List[str]="absolute" ,_snake_case : Optional[Any]=None ,_snake_case : Union[str, Any]=True ,_snake_case : Optional[Any]=True ,_snake_case : List[Any]=768 ,_snake_case : Dict=910 ,_snake_case : List[str]=512 ,_snake_case : List[str]=24_858 ,_snake_case : Tuple=True ,**_snake_case : str ,) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : int = max_position_embeddings
lowercase__ : Optional[Any] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : int = type_vocab_size
lowercase__ : int = layer_norm_eps
lowercase__ : List[Any] = use_cache
lowercase__ : List[str] = enable_pronunciation
lowercase__ : Tuple = enable_shape
lowercase__ : Optional[Any] = pronunciation_embed_dim
lowercase__ : Tuple = pronunciation_vocab_size
lowercase__ : Optional[Any] = shape_embed_dim
lowercase__ : List[Any] = shape_vocab_size
lowercase__ : int = concat_input
lowercase__ : str = position_embedding_type
lowercase__ : Dict = classifier_dropout
super().__init__(pad_token_id=_snake_case ,**_snake_case )
| 16 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 1 |
import os
_lowerCamelCase : List[Any] = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Dict = 0
while index < len(UpperCamelCase_ ) - 1:
_lowerCAmelCase : Union[str, Any] = SYMBOLS[numerals[index]]
_lowerCAmelCase : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Tuple = num // 1000
numerals += m_count * "M"
num %= 1000
_lowerCAmelCase : str = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_lowerCAmelCase : Optional[int] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _UpperCAmelCase (UpperCamelCase_ : str = "/p089_roman.txt" ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
with open(os.path.dirname(UpperCamelCase_ ) + roman_numerals_filename ) as filea:
_lowerCAmelCase : Tuple = filea.readlines()
for line in lines:
_lowerCAmelCase : Any = line.strip()
_lowerCAmelCase : Dict = parse_roman_numerals(UpperCamelCase_ )
_lowerCAmelCase : List[str] = generate_roman_numerals(UpperCamelCase_ )
savings += len(UpperCamelCase_ ) - len(UpperCamelCase_ )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 159 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCAmelCase (UpperCamelCase_ : Sequence[float] , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_lowerCAmelCase : List[str] = (low + high) // 2
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = max_subarray(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = max_subarray(UpperCamelCase_ , mid + 1 , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = max_cross_sum(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCAmelCase (UpperCamelCase_ : Sequence[float] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = float("""-inf""" ), -1
_lowerCAmelCase , _lowerCAmelCase : str = float("""-inf""" ), -1
_lowerCAmelCase : int | float = 0
for i in range(UpperCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_lowerCAmelCase : Any = summ
_lowerCAmelCase : Tuple = i
_lowerCAmelCase : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_lowerCAmelCase : List[Any] = summ
_lowerCAmelCase : str = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : str = [randint(1 , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )]
_lowerCAmelCase : str = time.time()
max_subarray(UpperCamelCase_ , 0 , input_size - 1 )
_lowerCAmelCase : Any = time.time()
return end - start
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Any = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_lowerCAmelCase : Any = [time_max_subarray(UpperCamelCase_ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCamelCase_ , UpperCamelCase_ ):
print(UpperCamelCase_ , """\t\t""" , UpperCamelCase_ )
plt.plot(UpperCamelCase_ , UpperCamelCase_ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 159 | 1 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__SCREAMING_SNAKE_CASE =WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : int = test_results.split(' ' )
lowercase_ : Optional[int] = 0
lowercase_ : List[Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase_ : Any = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : List[str] = {}
lowercase_ : Dict = None
lowercase_ : str = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , lowercase_ ):
lowercase_ : Optional[int] = True
lowercase_ : Any = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowercase_ : str = line
lowercase_ : Optional[int] = False
return failures
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = title
lowercase_ : List[str] = doc_test_results["""time_spent"""].split(',' )[0]
lowercase_ : Optional[Any] = doc_test_results["""success"""]
lowercase_ : Optional[Any] = doc_test_results["""failures"""]
lowercase_ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase_ : Union[str, Any] = doc_test_results
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = [self._time_spent]
lowercase_ : Optional[Any] = 0
for time in time_spent:
lowercase_ : Optional[Any] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(A__ ) == 1:
lowercase_ : str = [0, 0, time_parts[0]]
lowercase_ : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowercase_ : str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'''{int(A__ )}h{int(A__ )}m{int(A__ )}s'''
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = 40
lowercase_ : Optional[int] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(A__ ,A__ )}
lowercase_ : Dict = """"""
for category, failures in category_failures.items():
if len(A__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(A__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(A__ )
@staticmethod
def _UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(A__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=A__ ,)
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowercase_ : Tuple = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
lowercase_ : Optional[Any] = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=A__ ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = """"""
for key, value in failures.items():
lowercase_ : Union[str, Any] = value[:200] + """ [Truncated]""" if len(A__ ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
lowercase_ : Dict = job_name
lowercase_ : List[Any] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
lowercase_ : str = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowercase_ : Tuple = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowercase_ : List[Any] = sorted(self.doc_test_results.items() ,key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowercase_ : Union[str, Any] = f'''*Num failures* :{len(job_result["failed"] )} \n'''
lowercase_ : Optional[int] = job_result["""failures"""]
lowercase_ : List[str] = self.get_reply_blocks(A__ ,A__ ,A__ ,text=A__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=f'''Results for {job}''' ,blocks=A__ ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def lowercase__( ):
lowercase_ : List[Any] = os.environ["""GITHUB_RUN_ID"""]
lowercase_ : Any = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowercase_ : Dict = requests.get(lowercase_ ).json()
lowercase_ : Any = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase_ : Tuple = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowercase_ ):
lowercase_ : Optional[int] = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowercase_ )
return {}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = {}
if os.path.exists(lowercase_ ):
lowercase_ : List[str] = os.listdir(lowercase_ )
for file in files:
try:
with open(os.path.join(lowercase_ , lowercase_ ) , encoding='utf-8' ) as f:
lowercase_ : List[str] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(lowercase_ , lowercase_ )}.''' ) from e
return _artifact
def lowercase__( ):
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : str = name
lowercase_ : str = []
def __str__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.name
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
lowercase_ : Dict[str, Artifact] = {}
lowercase_ : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase_ : List[Any] = directory
if artifact_name not in _available_artifacts:
lowercase_ : Dict = Artifact(lowercase_ )
_available_artifacts[artifact_name].add_path(lowercase_ )
return _available_artifacts
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =get_job_links()
__SCREAMING_SNAKE_CASE =retrieve_available_artifacts()
__SCREAMING_SNAKE_CASE =collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__SCREAMING_SNAKE_CASE ={
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__SCREAMING_SNAKE_CASE =github_actions_job_links.get("run_doctests")
__SCREAMING_SNAKE_CASE =available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__SCREAMING_SNAKE_CASE =retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__SCREAMING_SNAKE_CASE =handle_test_results(artifact["stats"])
__SCREAMING_SNAKE_CASE =failed
__SCREAMING_SNAKE_CASE =success
__SCREAMING_SNAKE_CASE =time_spent[1:-1] + ', '
__SCREAMING_SNAKE_CASE =extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__SCREAMING_SNAKE_CASE =line.replace("FAILED ", "")
__SCREAMING_SNAKE_CASE =line.split()[0].replace("\n", "")
if "::" in line:
__SCREAMING_SNAKE_CASE =line.split("::")
else:
__SCREAMING_SNAKE_CASE =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__SCREAMING_SNAKE_CASE =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__SCREAMING_SNAKE_CASE =all_failures[test] if test in all_failures else 'N/A'
__SCREAMING_SNAKE_CASE =failure
break
__SCREAMING_SNAKE_CASE =Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 213 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Tuple ) -> Any:
# Construct model
if gpta_config_file == "":
A__ : Dict = GPTaConfig()
else:
A__ : List[Any] = GPTaConfig.from_json_file(lowercase_ )
A__ : Tuple = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_UpperCAmelCase : str = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
_UpperCAmelCase : Tuple = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_UpperCAmelCase : List[Any] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_UpperCAmelCase : Any = requests.get(image_url).content
_UpperCAmelCase : List[Any] = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 200 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = BigBirdConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
snake_case_ = BigBirdForQuestionAnswering(UpperCamelCase__ )
else:
snake_case_ = BigBirdForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase__ , UpperCamelCase__ , is_trivia_qa=UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_UpperCAmelCase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 200 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
assert column_title.isupper()
a__: Dict = 0
a__: int = len(A__ ) - 1
a__: int = 0
while index >= 0:
a__: Tuple = (ord(column_title[index] ) - 64) * pow(26 , A__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if isinstance(A__ , A__ ):
__lowercase = np.full((len(A__ ), sequence_length, 2) , A__ )
else:
__lowercase = np.full((len(A__ ), sequence_length) , A__ )
for i, tensor in enumerate(A__ ):
if padding_side == "right":
if isinstance(A__ , A__ ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
else:
if isinstance(A__ , A__ ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
return out_tensor.tolist()
def _A ( A__ ):
"""simple docstring"""
__lowercase = ord(A__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase = unicodedata.category(A__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = -1_0_0
SCREAMING_SNAKE_CASE : str = "pt"
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
import torch
__lowercase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase = self.tokenizer.pad(
lowercase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' if labels is None else None ,)
if labels is None:
return batch
__lowercase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowercase = self.tokenizer.padding_side
if padding_side == "right":
__lowercase = [
list(lowercase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase__ )) for label in labels
]
else:
__lowercase = [
[self.label_pad_token_id] * (sequence_length - len(lowercase__ )) + list(lowercase__ ) for label in labels
]
__lowercase = [feature['''ner_tags'''] for feature in features]
__lowercase = padding_tensor(lowercase__ ,-1 ,lowercase__ ,lowercase__ )
__lowercase = [feature['''original_entity_spans'''] for feature in features]
__lowercase = padding_tensor(lowercase__ ,(-1, -1) ,lowercase__ ,lowercase__ )
__lowercase = {k: torch.tensor(lowercase__ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ = None
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
elif name.split('.' )[0] == "proj":
snake_case_ = fairseq_model.proj
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
snake_case_ = 'weight'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def UpperCAmelCase ( UpperCAmelCase ) -> str:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.split(' ' )[0] for line in lines]
snake_case_ = len(UpperCAmelCase )
snake_case_ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
snake_case_ = WavaVecaConfig.from_pretrained(UpperCAmelCase )
snake_case_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase , vocab_size=UpperCAmelCase , decoder_layers=UpperCAmelCase , do_stable_layer_norm=UpperCAmelCase )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case_ = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ = WavaVecaModel(UpperCAmelCase )
snake_case_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
snake_case_ = SpeechaTextaForCausalLM(UpperCAmelCase )
snake_case_ , snake_case_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
snake_case_ = False
# add projection layer
snake_case_ = nn.Parameter(projection_layer.weight )
snake_case_ = nn.Parameter(projection_layer.bias )
snake_case_ = create_vocab_dict(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
snake_case_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCAmelCase )
snake_case_ = hf_wavavec.config.to_dict()
snake_case_ = tokenizer.pad_token_id
snake_case_ = tokenizer.bos_token_id
snake_case_ = tokenizer.eos_token_id
snake_case_ = 'speech_to_text_2'
snake_case_ = 'wav2vec2'
snake_case_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__UpperCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 312 | """simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = create_tensor(lowercase )
__lowercase = gather(lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [state.process_index]
__lowercase = gather_object(lowercase )
assert len(lowercase ) == state.num_processes, F"{gathered_obj}, {len(lowercase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = create_tensor(lowercase )
__lowercase = broadcast(lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if state.is_main_process:
__lowercase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase = torch.arange(state.num_processes ).to(state.device )
__lowercase = pad_across_processes(lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if state.num_processes != 2:
return
__lowercase = create_tensor(lowercase )
__lowercase = reduce(lowercase , '''sum''' )
__lowercase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase , lowercase ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if state.num_processes != 2:
return
__lowercase = create_tensor(lowercase )
__lowercase = reduce(lowercase , '''mean''' )
__lowercase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase , lowercase ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
main()
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = PartialState()
state.print(F"State: {state}" )
state.print('''testing gather''' )
test_gather(lowercase )
state.print('''testing gather_object''' )
test_gather_object(lowercase )
state.print('''testing broadcast''' )
test_broadcast(lowercase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(lowercase )
state.print('''testing reduce_sum''' )
test_reduce_sum(lowercase )
state.print('''testing reduce_mean''' )
test_reduce_mean(lowercase )
if __name__ == "__main__":
main() | 210 | import qiskit
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowercase = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 210 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase__ : Any = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCamelCase__ : Optional[Any] = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.1_5},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
UpperCamelCase__ : Optional[int] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase__ : Optional[int] = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCamelCase__ : List[Any] = """allenai"""
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
a = dict((re.sub(r'''@@$''', '''''', snake_case_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''', '''</w>''', snake_case_ ), v) for k, v in d.items() )
a = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
a = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
assert os.path.exists(snake_case_ )
os.makedirs(snake_case_, exist_ok=snake_case_ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
a = basename(snake_case_ )
a = dirname(snake_case_ )
a = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a = cls.hub_models()
a = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
a = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
a = hub_utils.from_pretrained(
snake_case_, snake_case_, snake_case_, archive_map=snake_case_, **snake_case_ )
a = vars(chkpt['''args''']['''model'''] )
a = args['''source_lang''']
a = args['''target_lang''']
a = dirname(snake_case_ )
a = basename(snake_case_ )
# dicts
a = os.path.join(snake_case_, f"""dict.{src_lang}.txt""" )
a = os.path.join(snake_case_, f"""dict.{tgt_lang}.txt""" )
a = Dictionary.load(snake_case_ )
a = rewrite_dict_keys(src_dict.indices )
a = len(snake_case_ )
a = os.path.join(snake_case_, '''vocab-src.json''' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(snake_case_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a = True
for k in src_vocab.keys():
if not k.islower():
a = False
break
a = Dictionary.load(snake_case_ )
a = rewrite_dict_keys(tgt_dict.indices )
a = len(snake_case_ )
a = os.path.join(snake_case_, '''vocab-tgt.json''' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(snake_case_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# merges_file (bpecodes)
a = os.path.join(snake_case_, VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a = os.path.join(snake_case_, snake_case_ )
if os.path.exists(snake_case_ ):
break
with open(snake_case_, encoding='''utf-8''' ) as fin:
a = fin.read()
a = re.sub(r''' \d+$''', '''''', snake_case_, 0, re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(snake_case_, '''w''', encoding='''utf-8''' ) as fout:
fout.write(snake_case_ )
# model config
a = os.path.join(snake_case_, '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
a = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
a = 5
a = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a = best_score_hparams[model_dir]['''length_penalty''']
else:
a = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(snake_case_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# tokenizer config
a = os.path.join(snake_case_, snake_case_ )
a = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1_0_2_4,
'''do_lower_case''': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(snake_case_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) )
# model
a = chkpt['''models'''][0]
a = model.state_dict()
# rename keys to start with 'model.'
a = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(snake_case_, snake_case_ )
a = FSMTConfig.from_pretrained(snake_case_ )
a = FSMTForConditionalGeneration(snake_case_ )
# check that it loads ok
model_new.load_state_dict(snake_case_, strict=snake_case_ )
# save
a = os.path.join(snake_case_, snake_case_ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(snake_case_, snake_case_ )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 330 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ : List[Any] = """RegNetConfig"""
# Base docstring
UpperCamelCase__ : Dict = """facebook/regnet-y-040"""
UpperCamelCase__ : int = [1, 1_088, 7, 7]
# Image classification docstring
UpperCamelCase__ : Optional[Any] = """facebook/regnet-y-040"""
UpperCamelCase__ : Dict = """tabby, tabby cat"""
UpperCamelCase__ : Dict = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : Optional[str] = "relu" ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
a = tf.keras.layers.ConvaD(
filters=__lowerCamelCase ,kernel_size=__lowerCamelCase ,strides=__lowerCamelCase ,padding='''VALID''' ,groups=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ,)
a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' )
a = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = self.convolution(self.padding(__lowerCamelCase ) )
a = self.normalization(__lowerCamelCase )
a = self.activation(__lowerCamelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Any ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = config.num_channels
a = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,)
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = shape_list(__lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a = tf.transpose(__lowerCamelCase ,perm=(0, 2, 3, 1) )
a = self.embedder(__lowerCamelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = tf.keras.layers.ConvaD(
filters=__lowerCamelCase ,kernel_size=1 ,strides=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' )
a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(__lowerCamelCase ) ,training=__lowerCamelCase )
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,**__lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' )
a = [
tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ),
]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = self.pooler(__lowerCamelCase )
for layer_module in self.attention:
a = layer_module(__lowerCamelCase )
a = hidden_state * pooled
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = in_channels != out_channels or stride != 1
a = max(1 ,out_channels // config.groups_width )
a = (
TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a = [
TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ),
TFRegNetConvLayer(
__lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ),
TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.2''' ),
]
a = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = hidden_state
for layer_module in self.layers:
a = layer_module(__lowerCamelCase )
a = self.shortcut(__lowerCamelCase )
hidden_state += residual
a = self.activation(__lowerCamelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = in_channels != out_channels or stride != 1
a = max(1 ,out_channels // config.groups_width )
a = (
TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' )
)
a = [
TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ),
TFRegNetConvLayer(
__lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ),
TFRegNetSELayer(__lowerCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ),
TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.3''' ),
]
a = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ):
'''simple docstring'''
a = hidden_state
for layer_module in self.layers:
a = layer_module(__lowerCamelCase )
a = self.shortcut(__lowerCamelCase )
hidden_state += residual
a = self.activation(__lowerCamelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
a = [
# downsampling is done in the first layer with stride of 2
layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,name='''layers.0''' ),
*[layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ):
'''simple docstring'''
for layer_module in self.layers:
a = layer_module(__lowerCamelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) )
a = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ,name=F"""stages.{i+1}""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ):
'''simple docstring'''
a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a = hidden_states + (hidden_state,)
a = stage_module(__lowerCamelCase )
if output_hidden_states:
a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase )
@keras_serializable
class lowerCamelCase_ ( tf.keras.layers.Layer ):
SCREAMING_SNAKE_CASE_ = RegNetConfig
def __init__( self : Dict ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = config
a = TFRegNetEmbeddings(__lowerCamelCase ,name='''embedder''' )
a = TFRegNetEncoder(__lowerCamelCase ,name='''encoder''' )
a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' )
@unpack_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,):
'''simple docstring'''
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.embedder(__lowerCamelCase ,training=__lowerCamelCase )
a = self.encoder(
__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase )
a = encoder_outputs[0]
a = self.pooler(__lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) )
a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a = tuple([tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = RegNetConfig
SCREAMING_SNAKE_CASE_ = 'regnet'
SCREAMING_SNAKE_CASE_ = 'pixel_values'
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )}
UpperCamelCase__ : Union[str, Any] = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCamelCase__ : List[str] = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a_ , )
class lowerCamelCase_ ( a_ ):
def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase )
a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : List[str]=False ,):
'''simple docstring'''
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.regnet(
pixel_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a_ , )
class lowerCamelCase_ ( a_ , a_ ):
def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : str ,**__lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase )
a = config.num_labels
a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' )
# classification head
a = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict=False ,):
'''simple docstring'''
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.regnet(
__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase )
a = outputs.pooler_output if return_dict else outputs[1]
a = self.classifier[0](__lowerCamelCase )
a = self.classifier[1](__lowerCamelCase )
a = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase ,logits=__lowerCamelCase )
if not return_dict:
a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
| 330 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase__ = getLogger(__name__)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 8 , lowercase__ = 1024 , lowercase__="val" , lowercase__=None , lowercase__=False , lowercase__="summarization" , lowercase__=None , lowercase__=1 , lowercase__ = None , lowercase__="" , **lowercase__ , ):
_lowerCamelCase : List[str] = str(lowercase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=lowercase__ )
_lowerCamelCase : str = Path(lowercase__ )
_lowerCamelCase : int = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(lowercase__ )
_lowerCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ ).cuda()
if fpaa:
_lowerCamelCase : Optional[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowercase__ , lowercase__ ) # update config with task specific params
_lowerCamelCase : Any = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowerCamelCase : Dict = num_return_sequences
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowerCamelCase : Tuple = tokenizer.model_max_length
if prefix is None:
_lowerCamelCase : Optional[Any] = prefix or getattr(model.config , 'prefix' , '' ) or ''
_lowerCamelCase : int = SeqaSeqDataset(
lowercase__ , lowercase__ , lowercase__ , max_target_length=1024 , type_path=lowercase__ , n_obs=lowercase__ , prefix=lowercase__ , **lowercase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowerCamelCase : List[Any] = ds.make_sortish_sampler(lowercase__ , distributed=lowercase__ , add_extra_examples=lowercase__ , shuffle=lowercase__ )
_lowerCamelCase : List[Any] = DataLoader(lowercase__ , sampler=lowercase__ , batch_size=lowercase__ , collate_fn=ds.collate_fn )
_lowerCamelCase : Optional[Any] = []
for batch in tqdm(lowercase__ ):
_lowerCamelCase : Tuple = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=lowercase__ , num_beams=lowercase__ , **lowercase__ , )
_lowerCamelCase : Tuple = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
_lowerCamelCase : str = batch['ids']
if num_return_sequences > 1:
_lowerCamelCase : Any = chunks(lowercase__ , lowercase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowercase__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(lowercase__ , lowercase__ )
return results, sampler.num_replicas
def _snake_case ( ):
_lowerCamelCase : str = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=lowercase__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=lowercase__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=lowercase__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=lowercase__ , default=lowercase__ )
parser.add_argument(
'--type_path' , type=lowercase__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=lowercase__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=lowercase__ , default=8 , required=lowercase__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=lowercase__ , default=-1 , required=lowercase__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=lowercase__ , default=lowercase__ , required=lowercase__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=lowercase__ , default=1 , required=lowercase__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=lowercase__ , default=600 , required=lowercase__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=lowercase__ , default=lowercase__ , required=lowercase__ )
parser.add_argument('--tgt_lang' , type=lowercase__ , default=lowercase__ , required=lowercase__ )
parser.add_argument(
'--prefix' , type=lowercase__ , required=lowercase__ , default=lowercase__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
_lowerCamelCase : str = time.time()
_lowerCamelCase, _lowerCamelCase : List[Any] = parser.parse_known_args()
_lowerCamelCase : Any = parse_numeric_n_bool_cl_kwargs(lowercase__ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_lowerCamelCase : List[Any] = Path(args.save_dir + '_tmp' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) # this handles locking.
_lowerCamelCase : Any = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowerCamelCase : Optional[Any] = {}
if args.src_lang is not None:
_lowerCamelCase : Tuple = args.src_lang
if args.tgt_lang is not None:
_lowerCamelCase : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowercase__ )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = eval_data_dir(
args.data_dir , lowercase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowercase__ , **lowercase__ , )
if args.local_rank <= 0:
_lowerCamelCase : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowercase__ )
_lowerCamelCase : Union[str, Any] = gather_results_from_each_node(lowercase__ , lowercase__ , args.sync_timeout )
_lowerCamelCase : str = combine_partial_results(lowercase__ )
if args.num_return_sequences > 1:
_lowerCamelCase : str = save_dir.joinpath('pseudolabel_results.json' )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(lowercase__ , lowercase__ )
return
_lowerCamelCase : Any = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(lowercase__ ) as f:
_lowerCamelCase : int = [x.rstrip() for x in f.readlines()][: len(lowercase__ )]
# Calculate metrics, save metrics, and save _generations.txt
_lowerCamelCase : str = 'translation' in args.task
_lowerCamelCase : Tuple = calculate_bleu if calc_bleu else calculate_rouge
_lowerCamelCase : List[str] = 'bleu' if calc_bleu else 'rouge'
_lowerCamelCase : Dict = score_fn(lowercase__ , lowercase__ )
_lowerCamelCase : Any = len(lowercase__ )
_lowerCamelCase : Dict = time.time() - start_time
_lowerCamelCase : Optional[int] = round(runtime / metrics['n_obs'] , 4 )
_lowerCamelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowerCamelCase : Union[str, Any] = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(lowercase__ , lowercase__ , indent=lowercase__ )
print(lowercase__ )
write_txt_file(lowercase__ , save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(lowercase__ , save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = []
for partial_result in partial_results:
records.extend(lowercase__ )
_lowerCamelCase : int = sorted(lowercase__ , key=lambda lowercase__ : x["id"] )
_lowerCamelCase : List[Any] = [x['pred'] for x in records]
return preds
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# WAIT FOR lots of .json files
_lowerCamelCase : str = time.time()
logger.info('waiting for all nodes to finish' )
_lowerCamelCase : Dict = None
while (time.time() - start_wait) < timeout:
_lowerCamelCase : Union[str, Any] = list(save_dir.glob('rank_*.json' ) )
if len(lowercase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowerCamelCase : str = lmap(lowercase__ , lowercase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate() | 96 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A : Optional[int] = logging.getLogger(__name__)
class a__ ( a_ ):
def __init__( self , _a=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowercase : List[str] = label_idx
def __magic_name__ ( self , _a , _a ):
if isinstance(_a , _a ):
lowercase : Optional[Any] = mode.value
lowercase : List[str] = os.path.join(_a , f"""{mode}.txt""" )
lowercase : str = 1
lowercase : Optional[int] = []
with open(_a , encoding="utf-8" ) as f:
lowercase : List[Any] = []
lowercase : Optional[int] = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
guid_index += 1
lowercase : int = []
lowercase : int = []
else:
lowercase : Optional[Any] = line.split(" " )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
return examples
def __magic_name__ ( self , _a , _a , _a ):
lowercase : List[str] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase : Any = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_a )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
lowercase : Optional[Any] = f.read().splitlines()
if "O" not in labels:
lowercase : List[Any] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a__ ( a_ ):
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
lowercase : Tuple = f.read().splitlines()
if "O" not in labels:
lowercase : Optional[int] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a__ ( a_ ):
def __magic_name__ ( self , _a , _a ):
if isinstance(_a , _a ):
lowercase : List[Any] = mode.value
lowercase : Optional[int] = os.path.join(_a , f"""{mode}.txt""" )
lowercase : Tuple = 1
lowercase : List[str] = []
with open(_a , encoding="utf-8" ) as f:
for sentence in parse_incr(_a ):
lowercase : Optional[Any] = []
lowercase : str = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
guid_index += 1
return examples
def __magic_name__ ( self , _a , _a , _a ):
lowercase : str = 0
for sentence in parse_incr(_a ):
lowercase : List[Any] = preds_list[example_id]
lowercase : List[str] = ""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_a )
example_id += 1
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 202 | 0 |
import os
import sys
_A = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_A = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase__ ( *a__ : int , **a__ : Tuple ) -> Tuple:
return AutoConfig.from_pretrained(*a__ , **a__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase__ ( *a__ : Optional[Any] , **a__ : List[Any] ) -> Optional[int]:
return AutoTokenizer.from_pretrained(*a__ , **a__ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase__ ( *a__ : int , **a__ : Union[str, Any] ) -> Dict:
return AutoModel.from_pretrained(*a__ , **a__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase__ ( *a__ : Union[str, Any] , **a__ : Union[str, Any] ) -> List[str]:
return AutoModelForCausalLM.from_pretrained(*a__ , **a__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase__ ( *a__ : Union[str, Any] , **a__ : Optional[int] ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*a__ , **a__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase__ ( *a__ : int , **a__ : Any ) -> Optional[Any]:
return AutoModelForSequenceClassification.from_pretrained(*a__ , **a__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase__ ( *a__ : Tuple , **a__ : List[str] ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*a__ , **a__ )
| 352 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''speech''']
def __init__( self : Optional[int] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Union[str, Any]):
requires_backends(self , ["speech"])
class __lowercase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = ['''speech''']
def __init__( self : str , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[str]):
requires_backends(self , ["speech"])
| 13 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _snake_case ( lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def _snake_case ( lowercase__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = np.max(_outputs , axis=-1 , keepdims=lowercase__ )
lowerCAmelCase_ :List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "sigmoid"
UpperCAmelCase_ :Dict = "softmax"
UpperCAmelCase_ :Tuple = "none"
@add_end_docstrings(
A__ , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = False
UpperCAmelCase_ :List[Any] = ClassificationFunction.NONE
def __init__( self , **__A ) -> List[str]:
super().__init__(**__A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , __A=None , __A=None , __A="" , **__A ) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCAmelCase_ :Tuple = tokenizer_kwargs
lowerCAmelCase_ :List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
lowerCAmelCase_ :int = self.model.config.return_all_scores
if isinstance(__A , __A ) or top_k is None:
lowerCAmelCase_ :Any = top_k
lowerCAmelCase_ :Optional[int] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __A , )
if return_all_scores:
lowerCAmelCase_ :Optional[Any] = None
else:
lowerCAmelCase_ :Dict = 1
if isinstance(__A , __A ):
lowerCAmelCase_ :Any = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase_ :Optional[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__A , **__A ) -> str:
lowerCAmelCase_ :Dict = super().__call__(*__A , **__A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase_ :Optional[int] = """top_k""" not in kwargs
if isinstance(args[0] , __A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , __A , **__A ) -> Dict[str, GenericTensor]:
lowerCAmelCase_ :List[str] = self.framework
if isinstance(__A , __A ):
return self.tokenizer(**__A , return_tensors=__A , **__A )
elif isinstance(__A , __A ) and len(__A ) == 1 and isinstance(inputs[0] , __A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__A , **__A )
elif isinstance(__A , __A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(__A , return_tensors=__A , **__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.model(**__A )
def __lowerCAmelCase ( self , __A , __A=None , __A=1 , __A=True ) -> Union[str, Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase_ :List[str] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase_ :Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
lowerCAmelCase_ :Optional[int] = self.model.config.function_to_apply
else:
lowerCAmelCase_ :List[Any] = ClassificationFunction.NONE
lowerCAmelCase_ :Any = model_outputs["""logits"""][0]
lowerCAmelCase_ :Optional[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase_ :Optional[int] = sigmoid(__A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase_ :Optional[int] = softmax(__A )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase_ :List[Any] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase_ :int = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__A )
]
if not _legacy:
dict_scores.sort(key=lambda __A : x["score"] , reverse=__A )
if top_k is not None:
lowerCAmelCase_ :List[str] = dict_scores[:top_k]
return dict_scores
| 1 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
lowerCamelCase : Tuple = data
lowerCamelCase : str = previous
lowerCamelCase : List[str] = next_node
def __str__( self ) -> str:
return F'''{self.data}'''
def _lowercase ( self ) -> int:
return self.data
def _lowercase ( self ) -> Dict:
return self.next
def _lowercase ( self ) -> Optional[int]:
return self.previous
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Optional[int] = head
def __iter__( self ) -> List[str]:
return self
def _lowercase ( self ) -> Any:
if not self.current:
raise StopIteration
else:
lowerCamelCase : str = self.current.get_data()
lowerCamelCase : Union[str, Any] = self.current.get_next()
return value
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> str:
lowerCamelCase : Dict = None # First node in list
lowerCamelCase : Any = None # Last node in list
def __str__( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = self.head
lowerCamelCase : List[str] = []
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase : Any = current.get_next()
return " ".join(str(UpperCamelCase__ ) for node in nodes )
def __contains__( self , UpperCamelCase__ ) -> str:
lowerCamelCase : int = self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase : Tuple = current.get_next()
return False
def __iter__( self ) -> Tuple:
return LinkedListIterator(self.head )
def _lowercase ( self ) -> str:
if self.head:
return self.head.get_data()
return None
def _lowercase ( self ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def _lowercase ( self , UpperCamelCase__ ) -> None:
if self.head is None:
lowerCamelCase : Union[str, Any] = node
lowerCamelCase : Any = node
else:
self.insert_before_node(self.head , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> None:
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.insert_after_node(self.tail , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : List[Any] = Node(UpperCamelCase__ )
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.set_tail(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
lowerCamelCase : Union[str, Any] = node
lowerCamelCase : Dict = node.previous
if node.get_previous() is None:
lowerCamelCase : int = node_to_insert
else:
lowerCamelCase : Dict = node_to_insert
lowerCamelCase : List[Any] = node_to_insert
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
lowerCamelCase : str = node
lowerCamelCase : Tuple = node.next
if node.get_next() is None:
lowerCamelCase : Optional[Any] = node_to_insert
else:
lowerCamelCase : Optional[int] = node_to_insert
lowerCamelCase : Union[str, Any] = node_to_insert
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
lowerCamelCase : Any = 1
lowerCamelCase : Optional[int] = Node(UpperCamelCase__ )
lowerCamelCase : Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ )
return
current_position += 1
lowerCamelCase : Union[str, Any] = node.next
self.insert_after_node(self.tail , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Node:
lowerCamelCase : int = self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase : Optional[int] = node.get_next()
raise Exception("Node not found" )
def _lowercase ( self , UpperCamelCase__ ) -> str:
if (node := self.get_node(UpperCamelCase__ )) is not None:
if node == self.head:
lowerCamelCase : Dict = self.head.get_next()
if node == self.tail:
lowerCamelCase : List[Any] = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ ) -> None:
if node.get_next():
lowerCamelCase : Dict = node.previous
if node.get_previous():
lowerCamelCase : Tuple = node.next
lowerCamelCase : Optional[int] = None
lowerCamelCase : List[Any] = None
def _lowercase ( self ) -> Tuple:
return self.head is None
def A ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCAmelCase : List[str] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :str , lowerCamelCase :bool , lowerCamelCase :str = None , lowerCamelCase :list = None ) -> Tuple:
UpperCAmelCase__ = None
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCAmelCase__ = os.path.abspath("examples" )
for item in os.listdir(lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase , feature_script=lowerCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = "\n".join(lowerCamelCase )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase__ = diff.replace(lowerCamelCase , "" )
self.assertEqual(lowerCamelCase , "" )
def UpperCAmelCase_ ( self :List[str] ) -> Any:
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCAmelCase__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = False
@classmethod
def UpperCAmelCase_ ( cls :List[Any] ) -> Any:
super().setUpClass()
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self :Dict ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> int:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
if torch.cuda.is_available():
UpperCAmelCase__ = torch.cuda.device_count()
else:
UpperCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
else:
self.assertIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
UpperCAmelCase__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
UpperCAmelCase__ = re.findall("({.+})" , lowerCamelCase )
UpperCAmelCase__ = [r for r in results if "accuracy" in r][-1]
UpperCAmelCase__ = ast.literal_eval(lowerCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "tracking" ) ) )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self :Any ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 169 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCAmelCase__ = {'''facebook/blenderbot-3B''': 128}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = BlenderbotTokenizer
def __init__(self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ) -> Union[str, Any]:
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCamelCase = getattr(__a , pre_tok_state.pop("type" ) )
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**__a )
UpperCamelCase = add_prefix_space
UpperCamelCase = "post_processor"
UpperCamelCase = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
UpperCamelCase = tuple(state["cls"] )
UpperCamelCase = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCamelCase = add_prefix_space
UpperCamelCase = True
if state.get("trim_offsets" , __a ) != trim_offsets:
UpperCamelCase = trim_offsets
UpperCamelCase = True
if changes_to_apply:
UpperCamelCase = getattr(__a , state.pop("type" ) )
UpperCamelCase = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case_ (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ (self , __a ) -> Any:
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCamelCase = value
def snake_case_ (self , *__a , **__a ) -> BatchEncoding:
UpperCamelCase = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def snake_case_ (self , *__a , **__a ) -> BatchEncoding:
UpperCamelCase = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
UpperCamelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , __a , __a = None ) -> int:
return token_ids_a + [self.eos_token_id]
def snake_case_ (self , __a ) -> List[int]:
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
UpperCamelCase = " ".join(__a )
UpperCamelCase = self.encode(__a )
if len(__a ) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Optional[int] = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Tuple = 'time_series_transformer'
UpperCAmelCase__ : str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self :List[str] , _A :List[str] = None , _A :str = None , _A :Dict = "student_t" , _A :Optional[int] = "nll" , _A :Tuple = 1 , _A :Union[str, Any] = [1, 2, 3, 4, 5, 6, 7] , _A :Tuple = "mean" , _A :List[str] = 0 , _A :Union[str, Any] = 0 , _A :str = 0 , _A :List[str] = 0 , _A :Optional[Any] = None , _A :List[str] = None , _A :Optional[int] = 32 , _A :str = 32 , _A :Union[str, Any] = 2 , _A :Any = 2 , _A :List[str] = 2 , _A :str = 2 , _A :Any = True , _A :Dict = "gelu" , _A :Union[str, Any] = 64 , _A :List[str] = 0.1 , _A :Any = 0.1 , _A :str = 0.1 , _A :int = 0.1 , _A :Optional[Any] = 0.1 , _A :Dict = 100 , _A :int = 0.02 , _A :str=True , **_A :Optional[int] , ) -> List[Any]:
'''simple docstring'''
__A = prediction_length
__A = context_length or prediction_length
__A = distribution_output
__A = loss
__A = input_size
__A = num_time_features
__A = lags_sequence
__A = scaling
__A = num_dynamic_real_features
__A = num_static_real_features
__A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__A = cardinality
else:
__A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__A = embedding_dimension
else:
__A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__A = num_parallel_samples
# Transformer architecture configuration
__A = input_size * len(_SCREAMING_SNAKE_CASE ) + self._number_of_features
__A = d_model
__A = encoder_attention_heads
__A = decoder_attention_heads
__A = encoder_ffn_dim
__A = decoder_ffn_dim
__A = encoder_layers
__A = decoder_layers
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = activation_function
__A = init_std
__A = use_cache
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 161 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 0 |
"""simple docstring"""
import math
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Tuple = 0
while num > 0:
UpperCAmelCase_ : int = num % 8
UpperCAmelCase_ : List[str] = octal + (remainder * math.floor(math.pow(10 ,A__ ) ))
counter += 1
UpperCAmelCase_ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(A__ )}"""
def snake_case ( ):
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(2_16 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(5_12 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 253 |
"""simple docstring"""
def snake_case ( A__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 253 | 1 |
'''simple docstring'''
import qiskit
def UpperCamelCase( UpperCAmelCase_ = 2 ):
UpperCAmelCase : Union[str, Any] = qubits
# Using Aer's simulator
UpperCAmelCase : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
UpperCAmelCase : Union[str, Any] = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase_ ) ) , list(range(UpperCAmelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCAmelCase : int = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=10_00 )
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 151 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 | 1 |
'''simple docstring'''
from typing import Any
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : dict , _UpperCamelCase : dict , _UpperCamelCase : dict , ) -> list:
'''simple docstring'''
_validation(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# Creates data structures and fill initial step
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for state in states_space:
UpperCamelCase__ = observations_space[0]
UpperCamelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_UpperCamelCase ) ):
UpperCamelCase__ = observations_space[o]
UpperCamelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ = ""
UpperCamelCase__ = -1
for k_state in states_space:
UpperCamelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ = probability
UpperCamelCase__ = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ = arg_max
# The final observation
UpperCamelCase__ = observations_space[len(_UpperCamelCase ) - 1]
# argmax for given final observation
UpperCamelCase__ = ""
UpperCamelCase__ = -1
for k_state in states_space:
UpperCamelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ = probability
UpperCamelCase__ = k_state
UpperCamelCase__ = arg_max
# Process pointers backwards
UpperCamelCase__ = last_state
UpperCamelCase__ = []
for o in range(len(_UpperCamelCase ) - 1 , -1 , -1 ):
result.append(_UpperCamelCase )
UpperCamelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_validate_lists(_UpperCamelCase , _UpperCamelCase )
_validate_dicts(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> None:
'''simple docstring'''
_validate_list(_UpperCamelCase , "observations_space" )
_validate_list(_UpperCamelCase , "states_space" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str ) -> None:
'''simple docstring'''
if not isinstance(_object , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a list'
raise ValueError(_UpperCamelCase )
else:
for x in _object:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a list of strings'
raise ValueError(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
_validate_dict(_UpperCamelCase , "initial_probabilities" , _UpperCamelCase )
_validate_nested_dict(_UpperCamelCase , "transition_probabilities" )
_validate_nested_dict(_UpperCamelCase , "emission_probabilities" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str ) -> None:
'''simple docstring'''
_validate_dict(_object , _UpperCamelCase , _UpperCamelCase )
for x in _object.values():
_validate_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : type , _UpperCamelCase : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a dict'
raise ValueError(_UpperCamelCase )
if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object ):
UpperCamelCase__ = F'{var_name} all keys must be strings'
raise ValueError(_UpperCamelCase )
if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object.values() ):
UpperCamelCase__ = "nested dictionary " if nested else ""
UpperCamelCase__ = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 31 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ ) | 31 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ) -> List[Any]:
UpperCamelCase_ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
UpperCamelCase_ = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("""RGB""" )
return image
def lowerCamelCase__ ( a__ : List[str] ) -> Union[str, Any]:
UpperCamelCase_ = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Union[str, Any] , a__ : List[Any] ) -> List[str]:
UpperCamelCase_ = dct.pop(a__ )
UpperCamelCase_ = val
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Dict ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase_ = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
UpperCamelCase_ = qkv_bias
def lowerCamelCase__ ( a__ : Optional[Any] ) -> str:
UpperCamelCase_ = 364 if """coco""" in model_name else 224
UpperCamelCase_ = InstructBlipVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCamelCase_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCamelCase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCamelCase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCamelCase_ = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
UpperCamelCase_ = InstructBlipConfig(vision_config=a__ , text_config=a__ , qformer_config=a__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( a__ : Any , a__ : Dict=None , a__ : List[Any]=False ) -> int:
UpperCamelCase_ = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
UpperCamelCase_ = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCamelCase_ = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
UpperCamelCase_ , UpperCamelCase_ = get_blipa_config(a__ )
UpperCamelCase_ = InstructBlipForConditionalGeneration(a__ ).eval()
UpperCamelCase_ = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
UpperCamelCase_ , UpperCamelCase_ = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
UpperCamelCase_ = """cuda:1""" if torch.cuda.is_available() else """cpu"""
UpperCamelCase_ = """cuda:2""" if torch.cuda.is_available() else """cpu"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
UpperCamelCase_ = original_model.state_dict()
UpperCamelCase_ = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase_ = state_dict.pop(a__ )
if key.startswith("""Qformer.bert""" ):
UpperCamelCase_ = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
UpperCamelCase_ = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
UpperCamelCase_ = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
UpperCamelCase_ = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
UpperCamelCase_ = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
UpperCamelCase_ = key.replace("""t5""" , """language""" )
UpperCamelCase_ = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(a__ , strict=a__ )
UpperCamelCase_ = load_demo_image()
UpperCamelCase_ = """What is unusual about this image?"""
# create processor
UpperCamelCase_ = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=a__ , image_std=a__ )
UpperCamelCase_ = InstructBlipProcessor(
image_processor=a__ , tokenizer=a__ , qformer_tokenizer=a__ , )
UpperCamelCase_ = processor(images=a__ , text=a__ , return_tensors="""pt""" ).to(a__ )
# make sure processor creates exact same pixel values
UpperCamelCase_ = vis_processors["""eval"""](a__ ).unsqueeze(0 ).to(a__ )
UpperCamelCase_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "vicuna" in model_name:
UpperCamelCase_ = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
UpperCamelCase_ = hf_model(**a__ ).logits
else:
UpperCamelCase_ = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
UpperCamelCase_ = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(a__ )
UpperCamelCase_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase_ = hf_model(**a__ , labels=a__ ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCamelCase_ = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , a__ , atol=a__ )
print("""Looks ok!""" )
print("""Generating with original model...""" )
UpperCamelCase_ = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
UpperCamelCase_ = hf_model.generate(
**a__ , do_sample=a__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCamelCase_ = 2
print("""Original generation:""" , a__ )
UpperCamelCase_ = processor.batch_decode(a__ , skip_special_tokens=a__ )
UpperCamelCase_ = [text.strip() for text in output_text]
print("""HF generation:""" , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
_A = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
_A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 122 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_5_5 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = self.size["""shortest_edge"""]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase_ = image_processing_a.pad(__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = image_processing_a(__UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
UpperCamelCase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
UpperCamelCase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_ = YolosImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
UpperCamelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 122 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a :
__lowerCAmelCase : List[str]
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default="""Translation""" , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self :Optional[Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase ( self :Dict ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class a :
__lowerCAmelCase : Optional[List] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default="""TranslationVariableLanguages""" , init=__lowerCamelCase , repr=__lowerCamelCase )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
snake_case__ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self :List[Any] ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[int] ):
snake_case__ : str = set(self.languages )
if self.languages and set(__lowercase ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(__lowercase ) - lang_set ) )}) are not in valid set ({', '.join(__lowercase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case__ : Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(__lowercase ,__lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case__ , snake_case__ : int = zip(*sorted(__lowercase ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase ( self :Union[str, Any] ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 44 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt'''}
A__ = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
A__ = {
'''openbmb/cpm-ant-10b''': 1024,
}
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : str = collections.OrderedDict()
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
snake_case__ : List[Any] = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case__ : str = token.rstrip('''\n''' )
snake_case__ : int = index
return vocab
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :str ,__lowercase :int="<unk>" ,__lowercase :Tuple=2_0_0 ):
snake_case__ : Union[str, Any] = vocab
snake_case__ : str = unk_token
snake_case__ : Dict = max_input_chars_per_word
def __lowerCamelCase ( self :Tuple ,__lowercase :Dict ):
snake_case__ : Optional[Any] = list(__lowercase )
if len(__lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case__ : List[Any] = 0
snake_case__ : List[str] = []
while start < len(__lowercase ):
snake_case__ : Any = len(__lowercase )
snake_case__ : Any = None
while start < end:
snake_case__ : Tuple = ''''''.join(chars[start:end] )
if substr in self.vocab:
snake_case__ : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowercase )
snake_case__ : Union[str, Any] = end
return sub_tokens
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Tuple = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : Optional[Any] = False
def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :Dict="<d>" ,__lowercase :List[Any]="</d>" ,__lowercase :Union[str, Any]="<s>" ,__lowercase :List[str]="</s>" ,__lowercase :str="<pad>" ,__lowercase :Tuple="<unk>" ,__lowercase :Tuple="</n>" ,__lowercase :List[Any]="</_>" ,__lowercase :str="left" ,**__lowercase :Optional[Any] ,):
requires_backends(self ,['''jieba'''] )
super().__init__(
bod_token=__lowercase ,eod_token=__lowercase ,bos_token=__lowercase ,eos_token=__lowercase ,pad_token=__lowercase ,unk_token=__lowercase ,line_token=__lowercase ,space_token=__lowercase ,padding_side=__lowercase ,**__lowercase ,)
snake_case__ : List[str] = bod_token
snake_case__ : List[Any] = eod_token
snake_case__ : List[Any] = load_vocab(__lowercase )
snake_case__ : Any = self.encoder[space_token]
snake_case__ : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __lowercase : x[1] ) )
snake_case__ : Any = {v: k for k, v in self.encoder.items()}
snake_case__ : Any = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def __lowerCamelCase ( self :Optional[int] ):
return self.encoder[self.bod_token]
@property
def __lowerCamelCase ( self :Union[str, Any] ):
return self.encoder[self.eod_token]
@property
def __lowerCamelCase ( self :List[str] ):
return self.encoder["\n"]
@property
def __lowerCamelCase ( self :Tuple ):
return len(self.encoder )
def __lowerCamelCase ( self :Any ):
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self :str ,__lowercase :Dict ):
snake_case__ : Tuple = []
for x in jieba.cut(__lowercase ,cut_all=__lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) )
return output_tokens
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[Any] ,**__lowercase :Union[str, Any] ):
snake_case__ : Dict = [i for i in token_ids if i >= 0]
snake_case__ : Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowercase ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :List[str] ):
return token in self.encoder
def __lowerCamelCase ( self :int ,__lowercase :List[str] ):
return "".join(__lowercase )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Optional[int] ):
return self.encoder.get(__lowercase ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self :Tuple ,__lowercase :int ):
return self.decoder.get(__lowercase ,self.unk_token )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
if os.path.isdir(__lowercase ):
snake_case__ : int = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
snake_case__ : str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
snake_case__ : List[str] = 0
if " " in self.encoder:
snake_case__ : Dict = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
snake_case__ : Union[str, Any] = self.encoder['''\n''']
del self.encoder["\n"]
snake_case__ : Dict = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __lowercase : x[1] ) )
with open(__lowercase ,'''w''' ,encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
snake_case__ : str = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __lowerCamelCase ( self :Tuple ,__lowercase :List[int] ,__lowercase :List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCamelCase ( self :int ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ,__lowercase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase ))
return [1] + ([0] * len(__lowercase ))
| 44 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {"vocab_file": "spiece.model"}
lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
lowerCAmelCase : List[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : str = 1
lowerCAmelCase : List[str] = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( _lowerCamelCase ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
def __init__( self , _a , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , _a ):
"""simple docstring"""
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if self.remove_space:
lowerCamelCase = """ """.join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("""NFKD""" , lowerCAmelCase_ )
lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.preprocess_text(lowerCAmelCase_ )
lowerCamelCase = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
lowerCamelCase = []
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase_ )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """""".join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , _a , _a = False , _a = None , _a = True , **_a , ):
"""simple docstring"""
lowerCamelCase = kwargs.pop("""use_source_tokenizer""" , lowerCAmelCase_ )
lowerCamelCase = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase = []
lowerCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
lowerCamelCase = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase = """""".join(lowerCAmelCase_ )
lowerCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None , _a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1]
return ([0] * len(lowerCAmelCase_ )) + [1, 1]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 291 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__A : Tuple = logging.get_logger(__name__)
__A : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : str = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
__A : Optional[Any] = {
'facebook/blenderbot_small-90M': 512,
}
class __UpperCamelCase ( lowercase__ ):
lowercase : str = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Dict = BlenderbotSmallTokenizer
def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Any = add_prefix_space
def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ):
snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
snake_case_ : int = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 352 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A : int = logging.getLogger()
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case_ : int = parser.parse_args()
return args.f
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , """r""" ) as f:
snake_case_ : str = json.load(lowerCamelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
__A : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( lowercase__ ):
@classmethod
def a__ ( cls :Dict ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def a__ ( cls :int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : Dict = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : str = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertLess(result["""train_loss"""] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[str] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[int] = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] ,2_8 )
self.assertGreaterEqual(result["""eval_exact"""] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Union[str, Any] = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Union[str, Any] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[Any] = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : int = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] ,2 )
self.assertGreaterEqual(result["""eval_rougeL"""] ,7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : Tuple = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[Any] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Any = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) )
@slow
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Any ):
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) ) | 8 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _lowercase):
def __get__( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=None ) -> int:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
_UpperCamelCase = '''__cached_''' + self.fget.__name__
_UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if cached is None:
_UpperCamelCase = self.fget(__UpperCamelCase )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return cached
def lowercase ( a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def lowercase ( a__ : List[str] ) -> Any:
if is_torch_fx_proxy(a__ ):
return True
if is_torch_available():
import torch
if isinstance(a__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(a__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(a__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(a__ , np.ndarray )
def lowercase ( a__ : Tuple ) -> Tuple:
return isinstance(a__ , np.ndarray )
def lowercase ( a__ : Dict ) -> int:
return _is_numpy(a__ )
def lowercase ( a__ : int ) -> Any:
import torch
return isinstance(a__ , torch.Tensor )
def lowercase ( a__ : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch(a__ )
def lowercase ( a__ : Tuple ) -> Union[str, Any]:
import torch
return isinstance(a__ , torch.device )
def lowercase ( a__ : int ) -> str:
return False if not is_torch_available() else _is_torch_device(a__ )
def lowercase ( a__ : Union[str, Any] ) -> int:
import torch
if isinstance(a__ , a__ ):
if hasattr(a__ , a__ ):
_UpperCamelCase = getattr(a__ , a__ )
else:
return False
return isinstance(a__ , torch.dtype )
def lowercase ( a__ : int ) -> List[str]:
return False if not is_torch_available() else _is_torch_dtype(a__ )
def lowercase ( a__ : int ) -> Any:
import tensorflow as tf
return isinstance(a__ , tf.Tensor )
def lowercase ( a__ : Optional[int] ) -> Optional[Any]:
return False if not is_tf_available() else _is_tensorflow(a__ )
def lowercase ( a__ : Optional[Any] ) -> str:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(a__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(a__ )
return type(a__ ) == tf.Tensor
def lowercase ( a__ : Optional[int] ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(a__ )
def lowercase ( a__ : List[Any] ) -> Optional[int]:
import jax.numpy as jnp # noqa: F811
return isinstance(a__ , jnp.ndarray )
def lowercase ( a__ : List[str] ) -> Dict:
return False if not is_flax_available() else _is_jax(a__ )
def lowercase ( a__ : List[Any] ) -> Optional[int]:
if isinstance(a__ , (dict, UserDict) ):
return {k: to_py_obj(a__ ) for k, v in obj.items()}
elif isinstance(a__ , (list, tuple) ):
return [to_py_obj(a__ ) for o in obj]
elif is_tf_tensor(a__ ):
return obj.numpy().tolist()
elif is_torch_tensor(a__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(a__ ):
return np.asarray(a__ ).tolist()
elif isinstance(a__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase ( a__ : List[str] ) -> int:
if isinstance(a__ , (dict, UserDict) ):
return {k: to_numpy(a__ ) for k, v in obj.items()}
elif isinstance(a__ , (list, tuple) ):
return np.array(a__ )
elif is_tf_tensor(a__ ):
return obj.numpy()
elif is_torch_tensor(a__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(a__ ):
return np.asarray(a__ )
else:
return obj
class UpperCAmelCase_ ( _lowercase):
def _UpperCamelCase ( self : Tuple ) -> List[str]:
_UpperCamelCase = fields(self )
# Safety and consistency checks
if not len(__UpperCamelCase ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
_UpperCamelCase = getattr(self , class_fields[0].name )
_UpperCamelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__UpperCamelCase ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = first_field.items()
_UpperCamelCase = True
else:
try:
_UpperCamelCase = iter(__UpperCamelCase )
_UpperCamelCase = True
except TypeError:
_UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__UpperCamelCase ):
if (
not isinstance(__UpperCamelCase , (list, tuple) )
or not len(__UpperCamelCase ) == 2
or not isinstance(element[0] , __UpperCamelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_UpperCamelCase = element[1]
elif first_field is not None:
_UpperCamelCase = first_field
else:
for field in class_fields:
_UpperCamelCase = getattr(self , field.name )
if v is not None:
_UpperCamelCase = v
def __delitem__( self : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _UpperCamelCase ( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : str ) -> List[str]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _UpperCamelCase ( self : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[Any] ) -> List[Any]:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _UpperCamelCase ( self : Union[str, Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , __UpperCamelCase : str ) -> Dict:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] ) -> Optional[int]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__UpperCamelCase , __UpperCamelCase )
super().__setattr__(__UpperCamelCase , __UpperCamelCase )
def __setitem__( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Any ) -> str:
# Will raise a KeyException if needed
super().__setitem__(__UpperCamelCase , __UpperCamelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _lowercase , _lowercase):
@classmethod
def _UpperCamelCase ( cls : Any , __UpperCamelCase : Optional[int] ) -> Any:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''longest'''
snake_case__ = '''max_length'''
snake_case__ = '''do_not_pad'''
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''pt'''
snake_case__ = '''tf'''
snake_case__ = '''np'''
snake_case__ = '''jax'''
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[ContextManager] ) -> Union[str, Any]:
_UpperCamelCase = context_managers
_UpperCamelCase = ExitStack()
def __enter__( self : Any ) -> Optional[int]:
for context_manager in self.context_managers:
self.stack.enter_context(__UpperCamelCase )
def __exit__( self : Dict , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ) -> List[str]:
self.stack.__exit__(*__UpperCamelCase , **__UpperCamelCase )
def lowercase ( a__ : Dict ) -> Optional[int]:
_UpperCamelCase = infer_framework(a__ )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase ( a__ : str ) -> Any:
_UpperCamelCase = model_class.__name__
_UpperCamelCase = infer_framework(a__ )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase ( a__ : MutableMapping , a__ : str = "" , a__ : str = "." ) -> List[str]:
def _flatten_dict(a__ : Optional[int] , a__ : Union[str, Any]="" , a__ : Tuple="." ):
for k, v in d.items():
_UpperCamelCase = str(a__ ) + delimiter + str(a__ ) if parent_key else k
if v and isinstance(a__ , a__ ):
yield from flatten_dict(a__ , a__ , delimiter=a__ ).items()
else:
yield key, v
return dict(_flatten_dict(a__ , a__ , a__ ) )
@contextmanager
def lowercase ( a__ : int , a__ : bool = False ) -> Tuple:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase ( a__ : Optional[Any] , a__ : Tuple=None ) -> int:
if is_numpy_array(a__ ):
return np.transpose(a__ , axes=a__ )
elif is_torch_tensor(a__ ):
return array.T if axes is None else array.permute(*a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.transpose(a__ , perm=a__ )
elif is_jax_tensor(a__ ):
return jnp.transpose(a__ , axes=a__ )
else:
raise ValueError(F'''Type not supported for transpose: {type(a__ )}.''' )
def lowercase ( a__ : str , a__ : List[Any] ) -> int:
if is_numpy_array(a__ ):
return np.reshape(a__ , a__ )
elif is_torch_tensor(a__ ):
return array.reshape(*a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.reshape(a__ , a__ )
elif is_jax_tensor(a__ ):
return jnp.reshape(a__ , a__ )
else:
raise ValueError(F'''Type not supported for reshape: {type(a__ )}.''' )
def lowercase ( a__ : str , a__ : Optional[int]=None ) -> Dict:
if is_numpy_array(a__ ):
return np.squeeze(a__ , axis=a__ )
elif is_torch_tensor(a__ ):
return array.squeeze() if axis is None else array.squeeze(dim=a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.squeeze(a__ , axis=a__ )
elif is_jax_tensor(a__ ):
return jnp.squeeze(a__ , axis=a__ )
else:
raise ValueError(F'''Type not supported for squeeze: {type(a__ )}.''' )
def lowercase ( a__ : Any , a__ : Dict ) -> Optional[int]:
if is_numpy_array(a__ ):
return np.expand_dims(a__ , a__ )
elif is_torch_tensor(a__ ):
return array.unsqueeze(dim=a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.expand_dims(a__ , axis=a__ )
elif is_jax_tensor(a__ ):
return jnp.expand_dims(a__ , axis=a__ )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(a__ )}.''' )
def lowercase ( a__ : Dict ) -> List[Any]:
if is_numpy_array(a__ ):
return np.size(a__ )
elif is_torch_tensor(a__ ):
return array.numel()
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.size(a__ )
elif is_jax_tensor(a__ ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(a__ )}.''' )
def lowercase ( a__ : Dict , a__ : Union[str, Any] ) -> Optional[int]:
for key, value in auto_map.items():
if isinstance(a__ , (tuple, list) ):
_UpperCamelCase = [F'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCamelCase = F'''{repo_id}--{value}'''
return auto_map
def lowercase ( a__ : Dict ) -> List[str]:
for base_class in inspect.getmro(a__ ):
_UpperCamelCase = base_class.__module__
_UpperCamelCase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 256 | """simple docstring"""
def lowercase ( a__ : float , a__ : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: str = "vit_msn"
def __init__( self : List[str] , _A : List[Any]=768 , _A : Tuple=12 , _A : Optional[int]=12 , _A : Optional[Any]=3072 , _A : int="gelu" , _A : Any=0.0 , _A : Optional[int]=0.0 , _A : int=0.0_2 , _A : Any=1E-06 , _A : List[Any]=224 , _A : Tuple=16 , _A : Dict=3 , _A : Union[str, Any]=True , **_A : int , ) -> Any:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Tuple = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Tuple = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = qkv_bias
| 361 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["image_processor", "tokenizer"]
__magic_name__: Optional[Any] = "LayoutLMv3ImageProcessor"
__magic_name__: str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , _A : List[str]=None , _A : Dict=None , **_A : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
snake_case_ : Any = kwargs.pop('feature_extractor' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
def __call__( self : List[str] , _A : Optional[Any] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : str , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
snake_case_ : str = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : str = features['words']
snake_case_ : Optional[int] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case_ : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case_ : Dict = self.get_overflowing_images(_A , encoded_inputs['overflow_to_sample_mapping'] )
snake_case_ : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase_ ( self : Dict , _A : Tuple , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F""" {len(_A )} and {len(_A )}""" )
return images_with_overflow
def UpperCAmelCase_ ( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase_ ( self : Union[str, Any] , *_A : Dict , **_A : str ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 88 | 0 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowerCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : Optional[Union[str, int]] = None
lowerCAmelCase__ : Optional[Union[str, int]] = None
lowerCAmelCase__ : Optional[Union[str, int]] = None
def snake_case__ ( self ) -> int:
A__ , A__ , A__ = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Union[str, Any]:
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.major, self.minor, self.patch
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return Version(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return other
raise TypeError(f'''{other} (type {type(__UpperCAmelCase )}) cannot be compared to version.''' )
def __eq__( self ,__UpperCAmelCase ) -> Optional[int]:
try:
A__ = self._validate_operand(__UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = self._validate_operand(__UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def snake_case__ ( cls ,__UpperCAmelCase ) -> Dict:
A__ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def snake_case__ ( self ) -> str:
return self.version_str
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = _VERSION_REG.match(UpperCamelCase__ )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(UpperCamelCase__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
| 221 | """simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = AlbertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
A__ = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 221 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase : List[Any] = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase : Dict = BASE_URL + '/user'
# https://github.com/settings/tokens
UpperCAmelCase : Any = os.environ.get('USER_TOKEN', '')
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""Authorization""": F'token {auth_token}',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(a__ , headers=a__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 331 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self : str, lowerCAmelCase : Tuple, lowerCAmelCase : Dict=13, lowerCAmelCase : str=7, lowerCAmelCase : Any=True, lowerCAmelCase : Any=True, lowerCAmelCase : int=True, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : Tuple=99, lowerCAmelCase : Optional[int]=32, lowerCAmelCase : int=5, lowerCAmelCase : Dict=4, lowerCAmelCase : Tuple=37, lowerCAmelCase : Dict="gelu", lowerCAmelCase : Tuple=0.1, lowerCAmelCase : Union[str, Any]=0.1, lowerCAmelCase : List[Any]=128, lowerCAmelCase : Optional[int]=32, lowerCAmelCase : Tuple=16, lowerCAmelCase : List[Any]=2, lowerCAmelCase : Union[str, Any]=0.02, lowerCAmelCase : Dict=3, lowerCAmelCase : str=4, lowerCAmelCase : List[str]=None, ) -> List[str]:
lowercase : Optional[int] = parent
lowercase : int = batch_size
lowercase : List[Any] = seq_length
lowercase : Optional[Any] = is_training
lowercase : Tuple = use_input_mask
lowercase : List[Any] = use_token_type_ids
lowercase : Dict = use_labels
lowercase : List[str] = vocab_size
lowercase : List[Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Tuple = hidden_act
lowercase : Any = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : List[Any] = type_vocab_size
lowercase : Union[str, Any] = type_sequence_label_size
lowercase : Any = initializer_range
lowercase : Tuple = num_labels
lowercase : List[str] = num_choices
lowercase : Tuple = scope
def lowercase ( self : List[str] ) -> int:
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase : Optional[int] = None
if self.use_input_mask:
lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase : Union[str, Any] = None
lowercase : int = None
lowercase : Any = None
if self.use_labels:
lowercase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowercase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : List[Any] ) -> List[Any]:
return NezhaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, )
def lowercase ( self : Union[str, Any] ) -> int:
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
lowercase : Tuple = True
lowercase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[Any] ) -> List[str]:
lowercase : Optional[int] = NezhaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : List[Any] = model(lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Any = model(lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : int = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase ( self : Union[str, Any], lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Tuple, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, ) -> Any:
lowercase : Union[str, Any] = True
lowercase : int = NezhaModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : Any = model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, )
lowercase : Optional[int] = model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, )
lowercase : List[str] = model(lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase ( self : Any, lowerCAmelCase : List[Any], lowerCAmelCase : List[str], lowerCAmelCase : int, lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[Any], lowerCAmelCase : int, lowerCAmelCase : Union[str, Any] ) -> Dict:
lowercase : Optional[Any] = NezhaForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : Optional[int] = model(lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : Any, lowerCAmelCase : int, lowerCAmelCase : str, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str] ) -> List[str]:
lowercase : List[str] = NezhaForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : List[Any] = model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def lowercase ( self : int, lowerCAmelCase : List[str], lowerCAmelCase : str, lowerCAmelCase : Any, lowerCAmelCase : List[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[Any]:
lowercase : Union[str, Any] = NezhaForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : List[str] = model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, next_sentence_label=lowerCAmelCase, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def lowercase ( self : str, lowerCAmelCase : str, lowerCAmelCase : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : int, lowerCAmelCase : str ) -> int:
lowercase : Union[str, Any] = NezhaForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : Any = model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase ( self : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : Tuple, lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : int, lowerCAmelCase : List[str], lowerCAmelCase : Any ) -> Optional[int]:
lowercase : Dict = self.num_labels
lowercase : Union[str, Any] = NezhaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : Union[str, Any] = model(lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : List[str], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str], lowerCAmelCase : int, lowerCAmelCase : List[Any], lowerCAmelCase : Union[str, Any] ) -> int:
lowercase : Optional[Any] = self.num_labels
lowercase : Dict = NezhaForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : List[Any] = model(lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Union[str, Any], lowerCAmelCase : str, lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Dict, lowerCAmelCase : str, lowerCAmelCase : Tuple ) -> Any:
lowercase : Dict = self.num_choices
lowercase : str = NezhaForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : List[str] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase : str = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase : str = model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> List[Any]:
lowercase : List[str] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[Any] = config_and_inputs
lowercase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
def lowercase ( self : int, lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str], lowerCAmelCase : str=False ) -> Optional[int]:
lowercase : Any = super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
lowercase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCAmelCase )
lowercase : Tuple = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase ( self : Dict ) -> str:
lowercase : List[str] = NezhaModelTester(self )
lowercase : List[Any] = ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase ( self : int ) -> List[str]:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase ( self : Any ) -> List[str]:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase )
def lowercase ( self : List[Any] ) -> str:
# This regression test was failing with PyTorch < 1.3
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, )
def lowercase ( self : List[str] ) -> int:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def lowercase ( self : Union[str, Any] ) -> Any:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase )
def lowercase ( self : int ) -> List[Any]:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def lowercase ( self : str ) -> Optional[Any]:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def lowercase ( self : str ) -> Optional[Any]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase ( self : str ) -> Dict:
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def lowercase ( self : Dict ) -> Optional[int]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = NezhaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase ( self : Optional[int] ) -> Any:
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase : Optional[Any] = True
lowercase : List[str] = model_class(config=lowerCAmelCase )
lowercase : Union[str, Any] = self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowercase : List[Any] = torch.jit.trace(
lowerCAmelCase, (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, 'bert.pt' ) )
lowercase : Optional[Any] = torch.jit.load(os.path.join(lowerCAmelCase, 'bert.pt' ), map_location=lowerCAmelCase )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase ), inputs_dict['attention_mask'].to(lowerCAmelCase ) )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : List[Any] ) -> Optional[int]:
lowercase : Any = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Tuple = model(lowerCAmelCase, attention_mask=lowerCAmelCase )[0]
lowercase : Any = torch.Size((1, 6, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowercase : List[str] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCAmelCase, atol=1e-4 ) )
@slow
def lowercase ( self : str ) -> int:
lowercase : List[str] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Union[str, Any] = model(lowerCAmelCase, attention_mask=lowerCAmelCase )[0]
lowercase : Union[str, Any] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape, lowerCAmelCase )
lowercase : int = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCAmelCase, atol=1e-4 ) )
| 255 |
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase = 1_00 ) -> int:
'''simple docstring'''
lowercase : List[str] = sum(i * i for i in range(1 , n + 1 ) )
lowercase : Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 | 1 |
"""simple docstring"""
import requests
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> None:
"""simple docstring"""
_lowerCAmelCase = {"""Content-Type""": """application/json"""}
_lowerCAmelCase = requests.post(snake_case_ , json={"""text""": message_body} , headers=snake_case_ )
if response.status_code != 200:
_lowerCAmelCase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''') | 317 |
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(lowerCamelCase ), len(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ (self , lowerCamelCase ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(lowerCamelCase )
if mismatch_index == -1:
positions.append(lowerCamelCase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE : Any = '''ABAABA'''
SCREAMING_SNAKE_CASE : Optional[int] = '''AB'''
SCREAMING_SNAKE_CASE : str = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions) | 317 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any]=7 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=18 , UpperCAmelCase_: List[Any]=30 , UpperCAmelCase_: Any=400 , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Dict=True , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 20}
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
def UpperCamelCase ( self: str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MobileNetVaImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """crop_size""" ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 125 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.bias']
_SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""]
_SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""]
_SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""]
_SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""]
_SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = channels_list[i]
_SCREAMING_SNAKE_CASE = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.downsamplers.0'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
_SCREAMING_SNAKE_CASE = current_channels
# hardcoded the mid-block for now
_SCREAMING_SNAKE_CASE = """mid_block.resnets.0"""
_SCREAMING_SNAKE_CASE = """middle_block.0"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.attentions.0"""
_SCREAMING_SNAKE_CASE = """middle_block.1"""
_SCREAMING_SNAKE_CASE = convert_attention(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.resnets.1"""
_SCREAMING_SNAKE_CASE = """middle_block.2"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""]
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.1'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.2'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 125 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Any = patch_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = embed_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = depths
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE_ : int = window_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mlp_ratio
SCREAMING_SNAKE_CASE_ : Any = qkv_bias
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = drop_path_rate
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = patch_norm
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : str = scope
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Dict = encoder_stride
SCREAMING_SNAKE_CASE_ : str = out_features
SCREAMING_SNAKE_CASE_ : List[str] = out_indices
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE_ : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['stem']
SCREAMING_SNAKE_CASE_ : int = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : str = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
"""simple docstring"""
return
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Tuple = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE_ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : int = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has"
f" `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class _A ( unittest.TestCase , __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = MaskFormerSwinConfig
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MaskFormerSwinModelTester(self )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
SCREAMING_SNAKE_CASE_ : Any = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE_ : Tuple = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 253 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__)
class _A ( __magic_name__):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True})
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''')})
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel})
SCREAMING_SNAKE_CASE : str = "text"
SCREAMING_SNAKE_CASE : str = "labels"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
SCREAMING_SNAKE_CASE_ : List[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE_ : List[Any] = label_schema
return task_template
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 253 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
a__ : int = 1_0_0_0_0
a__ : Optional[List[str]] = None
a__ : Optional[datasets.Features] = None
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
a__ : Any = ParquetConfig
def UpperCamelCase__ ( self) -> Dict:
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase__ ( self , __lowercase) -> str:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
__UpperCamelCase :List[Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__lowercase , (str, list, tuple)):
__UpperCamelCase :Any = data_files
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCamelCase :Tuple = [dl_manager.iter_files(__lowercase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
__UpperCamelCase :Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCamelCase :Dict = [dl_manager.iter_files(__lowercase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__lowercase):
with open(__lowercase , '''rb''') as f:
__UpperCamelCase :int = datasets.Features.from_arrow_schema(pq.read_schema(__lowercase))
break
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={'''files''': files}))
return splits
def UpperCamelCase__ ( self , __lowercase) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCamelCase :Tuple = table_cast(__lowercase , self.info.features.arrow_schema)
return pa_table
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Any = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""")
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowercase)):
with open(__lowercase , '''rb''') as f:
__UpperCamelCase :Dict = pq.ParquetFile(__lowercase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
__UpperCamelCase :Tuple = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(__lowercase)
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(__lowercase)}: {e}""")
raise
| 351 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__lowercase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase=False , __lowercase=False , __lowercase=6.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase="fp4" , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :List[str] = load_in_abit
__UpperCamelCase :Union[str, Any] = load_in_abit
__UpperCamelCase :str = llm_inta_threshold
__UpperCamelCase :List[str] = llm_inta_skip_modules
__UpperCamelCase :Any = llm_inta_enable_fpaa_cpu_offload
__UpperCamelCase :List[Any] = llm_inta_has_fpaa_weight
__UpperCamelCase :str = bnb_abit_quant_type
__UpperCamelCase :Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCamelCase :Tuple = torch.floataa
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = getattr(__lowercase , __lowercase)
elif isinstance(__lowercase , torch.dtype):
__UpperCamelCase :int = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''')
self.post_init()
def UpperCamelCase__ ( self) -> Union[str, Any]:
if not isinstance(self.llm_inta_threshold , __lowercase):
raise ValueError('''llm_int8_threshold must be a float''')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowercase):
raise ValueError('''llm_int8_skip_modules must be a list of strings''')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowercase):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''')
if not isinstance(self.llm_inta_has_fpaa_weight , __lowercase):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''')
if not isinstance(self.bnb_abit_quant_type , __lowercase):
raise ValueError('''bnb_4bit_quant_type must be a string''')
if not isinstance(self.bnb_abit_use_double_quant , __lowercase):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''')
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''')) >= version.parse(
'''0.39.0'''):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''')
def UpperCamelCase__ ( self) -> Any:
return self.load_in_abit or self.load_in_abit
def UpperCamelCase__ ( self) -> List[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase , **__lowercase) -> List[str]:
__UpperCamelCase :Optional[int] = cls(**__lowercase)
__UpperCamelCase :Optional[Any] = []
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase):
setattr(__lowercase , __lowercase , __lowercase)
to_remove.append(__lowercase)
for key in to_remove:
kwargs.pop(__lowercase , __lowercase)
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
with open(__lowercase , '''w''' , encoding='''utf-8''') as writer:
__UpperCamelCase :Optional[int] = self.to_dict()
__UpperCamelCase :Optional[int] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + '''\n'''
writer.write(__lowercase)
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Optional[Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :Optional[int] = str(output['''bnb_4bit_compute_dtype''']).split('''.''')[1]
return output
def __repr__( self) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCamelCase__ ( self , __lowercase = True) -> str:
if use_diff is True:
__UpperCamelCase :Union[str, Any] = self.to_diff_dict()
else:
__UpperCamelCase :Dict = self.to_dict()
return json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + "\n"
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Union[str, Any] = self.to_dict()
# get the default config dict
__UpperCamelCase :Optional[Any] = BitsAndBytesConfig().to_dict()
__UpperCamelCase :str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCamelCase :str = value
return serializable_config_dict
| 105 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
UpperCamelCase__ = f'''https://www.google.com/search?q={query}&num=100'''
UpperCamelCase__ = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 65 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 1 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :List[Any] = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :Dict = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :int = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ )
lowerCAmelCase_ :Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :List[Any] = config["""lr"""]
lowerCAmelCase_ :List[Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :List[Any] = int(config["""seed"""] )
lowerCAmelCase_ :int = int(config["""batch_size"""] )
lowerCAmelCase_ :Any = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase_ :int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase_ :Dict = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase_ :Dict = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Any = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
lowerCAmelCase_ :Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ :Optional[int] = model(**UpperCamelCase__ )
lowerCAmelCase_ :Any = outputs.loss
lowerCAmelCase_ :Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :str = model(**UpperCamelCase__ )
lowerCAmelCase_ :int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
lowerCAmelCase_ :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Any = parser.parse_args()
lowerCAmelCase_ :Optional[int] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 362 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _UpperCAmelCase , unittest.TestCase):
lowerCamelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 128, 128), rng=random.Random(a__))
_lowerCAmelCase : str = np.random.RandomState(a__)
_lowerCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : int = self.get_dummy_inputs()
_lowerCAmelCase : int = pipe(**a__).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : int = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=a__)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : Optional[int] = pipe(**a__).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : int = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
# warmup pass to apply optimizations
_lowerCAmelCase : int = pipe(**self.get_dummy_inputs())
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**a__).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : List[Any] = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**a__).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**a__).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ort.SessionOptions()
_lowerCAmelCase : List[Any] = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : List[Any] = init_image.resize((768, 512))
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=a__, feature_extractor=a__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Optional[int] = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : int = pipe(
prompt=a__, image=a__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=a__, output_type="np", )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : str = init_image.resize((768, 512))
_lowerCAmelCase : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx")
_lowerCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=a__, safety_checker=a__, feature_extractor=a__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Dict = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : int = np.random.RandomState(0)
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__, image=a__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=a__, output_type="np", )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Tuple = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 36 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
def a__ ( lowercase : int = 10, lowercase : int = 1000, lowercase : bool = True ) -> int:
"""simple docstring"""
assert (
isinstance(lowercase, lowercase )
and isinstance(lowercase, lowercase )
and isinstance(lowercase, lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def a__ ( lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def a__ ( lowercase : int, lowercase : int, lowercase : int ) -> None:
"""simple docstring"""
assert (
isinstance(lowercase, lowercase ) and isinstance(lowercase, lowercase ) and isinstance(lowercase, lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
_UpperCamelCase = lower
_UpperCamelCase = higher
_UpperCamelCase = []
while True:
_UpperCamelCase = get_avg(lowercase, lowercase )
last_numbers.append(lowercase )
if answer(lowercase ) == "low":
_UpperCamelCase = number
elif answer(lowercase ) == "high":
_UpperCamelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = int(input('''Enter lower value : ''' ).strip() )
_UpperCamelCase = int(input('''Enter high value : ''' ).strip() )
_UpperCamelCase = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(lowercase, lowercase, lowercase )
if __name__ == "__main__":
main()
| 287 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : str=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=4 , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = True
_snake_case : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 287 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 255 , a__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Optional[Any] = min_resolution
_lowerCAmelCase : Tuple = max_resolution
_lowerCAmelCase : List[Any] = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : Optional[int] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Dict = image_std
_lowerCAmelCase : Optional[Any] = do_rescale
_lowerCAmelCase : List[Any] = rescale_factor
_lowerCAmelCase : int = do_pad
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self , a__ , a__=False ):
if not batched:
_lowerCAmelCase : Optional[int] = image_inputs[0]
if isinstance(a__ , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase : str = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : Any = int(self.size["""shortest_edge"""] * h / w )
_lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
elif w > h:
_lowerCAmelCase : Tuple = self.size["""shortest_edge"""]
_lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
_lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
_lowerCAmelCase : Any = self.size["""shortest_edge"""]
else:
_lowerCAmelCase : Optional[int] = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase : List[str] = max(a__ , key=lambda a__ : item[0] )[0]
_lowerCAmelCase : Any = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : Dict = ConditionalDetrImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , a__ )
_lowerCAmelCase : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , a__ )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : str = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
_lowerCAmelCase : Optional[Any] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase : str = image_processing(a__ , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase : Tuple = image_processing(a__ , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __A ( self ):
# prepare image and target
_lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = json.loads(f.read() )
_lowerCAmelCase : Optional[int] = {"""image_id""": 39769, """annotations""": target}
# encode them
_lowerCAmelCase : Union[str, Any] = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
_lowerCAmelCase : Any = image_processing(images=a__ , annotations=a__ , return_tensors="""pt""" )
# verify pixel values
_lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1e-4 ) )
# verify area
_lowerCAmelCase : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
_lowerCAmelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase : Any = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
_lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
_lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify orig_size
_lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
_lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
@slow
def __A ( self ):
# prepare image, target and masks_path
_lowerCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_lowerCAmelCase : str = json.loads(f.read() )
_lowerCAmelCase : Dict = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
_lowerCAmelCase : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_lowerCAmelCase : List[str] = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
_lowerCAmelCase : str = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors="""pt""" )
# verify pixel values
_lowerCAmelCase : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , a__ )
_lowerCAmelCase : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , a__ , atol=1e-4 ) )
# verify area
_lowerCAmelCase : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , a__ ) )
# verify boxes
_lowerCAmelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , a__ , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , a__ ) )
# verify is_crowd
_lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , a__ ) )
# verify class_labels
_lowerCAmelCase : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , a__ ) )
# verify masks
_lowerCAmelCase : int = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , a__ )
# verify orig_size
_lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , a__ ) )
# verify size
_lowerCAmelCase : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , a__ ) )
| 44 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = CTRLTokenizer
__A = False
__A = False
def lowercase__ ( self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ :Optional[int] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowercase_ :Optional[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowercase_ :Optional[int] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowercase_ :Tuple = {"unk_token": "<unk>"}
lowercase_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def lowercase__ ( self : int , **lowercase : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : List[str] , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Union[str, Any] = "adapt react readapt apt"
lowercase_ :List[Any] = "adapt react readapt apt"
return input_text, output_text
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ :List[Any] = "adapt react readapt apt"
lowercase_ :str = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowercase_ :List[str] = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowercase_ :str = tokens + [tokenizer.unk_token]
lowercase_ :List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
| 147 |
'''simple docstring'''
import numpy as np
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ):
lowercase_ :Dict = int(np.ceil((x_end - xa) / h ) )
lowercase_ :Optional[int] = np.zeros((n + 1,) )
lowercase_ :Tuple = ya
lowercase_ :List[str] = xa
for k in range(__lowerCamelCase ):
lowercase_ :Any = f(__lowerCamelCase ,y[k] )
lowercase_ :Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
lowercase_ :Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
lowercase_ :Any = f(x + h ,y[k] + h * ka )
lowercase_ :Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 1 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = "llama"
UpperCAmelCase_ :str = ["past_key_values"]
def __init__( self , __A=3_2000 , __A=4096 , __A=1_1008 , __A=32 , __A=32 , __A=None , __A="silu" , __A=2048 , __A=0.0_2 , __A=1E-6 , __A=True , __A=0 , __A=1 , __A=2 , __A=1 , __A=False , __A=None , **__A , ) -> List[str]:
lowerCAmelCase_ :str = vocab_size
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :int = hidden_size
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Tuple = num_hidden_layers
lowerCAmelCase_ :Dict = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase_ :List[str] = num_attention_heads
lowerCAmelCase_ :Any = num_key_value_heads
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :str = rms_norm_eps
lowerCAmelCase_ :List[Any] = pretraining_tp
lowerCAmelCase_ :int = use_cache
lowerCAmelCase_ :List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , )
def __lowerCAmelCase ( self ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase_ :List[str] = self.rope_scaling.get("""type""" , __A )
lowerCAmelCase_ :List[Any] = self.rope_scaling.get("""factor""" , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ : ClassVar[Features] = Features({"text": Value("string" )} )
UpperCAmelCase__ : ClassVar[Features] = Features({} )
UpperCAmelCase__ : str = "text"
@property
def _a ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 62 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 0 |
import datasets
from .evaluate import evaluate
lowerCamelCase__ : Dict = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowerCamelCase__ : List[str] = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowerCamelCase__ : List[str] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
SCREAMING_SNAKE_CASE_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score | 210 |
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = psutil.Process()
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = -1
while True:
SCREAMING_SNAKE_CASE_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_ = True
self.thread.start()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ : List[str] = PeakCPUMemory()
def UpperCAmelCase_ ( ) -> Tuple:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated(__UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
SCREAMING_SNAKE_CASE_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
SCREAMING_SNAKE_CASE_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" )
SCREAMING_SNAKE_CASE_ = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" ) | 210 | 1 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_A : Optional[int] = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
_A : List[Any] = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_A : List[str] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __magic_name__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __magic_name__ ( self , _a , _a , _a=4 , _a=False ):
lowercase : List[str] = compute_bleu(
reference_corpus=_a , translation_corpus=_a , max_order=_a , smooth=_a )
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 202 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "laion/clap-htsat-unfused"
lowercase : Optional[int] = tempfile.mkdtemp()
def __magic_name__ ( self , **_a ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self , **_a ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
lowercase : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Optional[int] = self.get_feature_extractor(do_normalize=_a , padding_value=1.0 )
lowercase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : List[str] = self.get_tokenizer()
lowercase : int = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Dict = floats_list((3, 1_000) )
lowercase : str = feature_extractor(_a , return_tensors="np" )
lowercase : Dict = processor(audios=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : Dict = self.get_feature_extractor()
lowercase : int = self.get_tokenizer()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Optional[Any] = "This is a test string"
lowercase : Any = processor(text=_a )
lowercase : List[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_feature_extractor()
lowercase : Any = self.get_tokenizer()
lowercase : Union[str, Any] = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str = processor.batch_decode(_a )
lowercase : Optional[int] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ClapProcessor(tokenizer=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 202 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
_A = 'openai-gpt'
_A = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self :int , a :Tuple=4_0_4_7_8 , a :Tuple=5_1_2 , a :int=7_6_8 , a :int=1_2 , a :str=1_2 , a :List[str]="gelu" , a :Any=0.1 , a :Optional[int]=0.1 , a :str=0.1 , a :Any=1E-5 , a :int=0.02 , a :Optional[int]="cls_index" , a :Tuple=True , a :Dict=None , a :Optional[int]=True , a :List[str]=0.1 , **a :List[str] , ) -> List[Any]:
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : List[str] = n_positions
__UpperCamelCase : List[str] = n_embd
__UpperCamelCase : Optional[Any] = n_layer
__UpperCamelCase : Dict = n_head
__UpperCamelCase : str = afn
__UpperCamelCase : Tuple = resid_pdrop
__UpperCamelCase : List[Any] = embd_pdrop
__UpperCamelCase : Any = attn_pdrop
__UpperCamelCase : Union[str, Any] = layer_norm_epsilon
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Dict = summary_type
__UpperCamelCase : str = summary_use_proj
__UpperCamelCase : Any = summary_activation
__UpperCamelCase : Union[str, Any] = summary_first_dropout
__UpperCamelCase : Optional[Any] = summary_proj_to_labels
super().__init__(**_UpperCAmelCase )
| 369 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) == 0)
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 151 | 0 |
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase, _lowercase ):
raise TypeError("""Input value must be an 'int' type""" )
snake_case_ :Any = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 0 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = nums.pop(0 )
lowercase__ = permute(SCREAMING_SNAKE_CASE_ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE_ )
result.extend(SCREAMING_SNAKE_CASE_ )
nums.append(SCREAMING_SNAKE_CASE_ )
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def backtrack(SCREAMING_SNAKE_CASE_ ):
if start == len(SCREAMING_SNAKE_CASE_ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ , lowercase__ = nums[i], nums[start]
backtrack(start + 1 )
lowercase__ , lowercase__ = nums[i], nums[start] # backtrack
lowercase__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowercase_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 224 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[Any] ="""transfo-xl"""
UpperCamelCase__ : Dict =["""mems"""]
UpperCamelCase__ : Optional[int] ={
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any], __lowercase : Optional[Any]=26_7735, __lowercase : int=[2_0000, 4_0000, 20_0000], __lowercase : Union[str, Any]=1024, __lowercase : Tuple=1024, __lowercase : Tuple=16, __lowercase : Optional[Any]=64, __lowercase : str=4096, __lowercase : Optional[int]=4, __lowercase : Union[str, Any]=False, __lowercase : Union[str, Any]=18, __lowercase : List[str]=1600, __lowercase : List[Any]=1000, __lowercase : Union[str, Any]=True, __lowercase : Tuple=True, __lowercase : Optional[Any]=0, __lowercase : List[str]=-1, __lowercase : int=True, __lowercase : Dict=0.1, __lowercase : Union[str, Any]=0.0, __lowercase : str=True, __lowercase : Optional[Any]="normal", __lowercase : str=0.01, __lowercase : Tuple=0.01, __lowercase : List[Any]=0.02, __lowercase : Any=1e-5, __lowercase : Union[str, Any]=0, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(__lowercase )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=__lowercase, **__lowercase )
@property
def A__ ( self : Optional[Any] ):
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def A__ ( self : List[str], __lowercase : Union[str, Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 224 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.