code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : str = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 397 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : int ) -> list[int]:
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 541 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 442 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCamelCase_ ( lowercase ):
__lowercase : torch.FloatTensor
class lowerCamelCase_ ( lowercase , lowercase ):
@register_to_config
def __init__( self , lowerCamelCase_ = 6_55_36 , lowerCamelCase_ = None , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = 0 , lowerCamelCase_ = "fourier" , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = 0.0 , lowerCamelCase_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase_ = "UNetMidBlock1D" , lowerCamelCase_ = None , lowerCamelCase_ = (32, 32, 64) , lowerCamelCase_ = None , lowerCamelCase_ = 8 , lowerCamelCase_ = 1 , lowerCamelCase_ = False , ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_UpperCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = block_out_channels[0]
if use_timestep_embedding:
_UpperCamelCase = block_out_channels[0] * 4
_UpperCamelCase = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , )
_UpperCamelCase = nn.ModuleList([] )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([] )
_UpperCamelCase = None
# down
_UpperCamelCase = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCamelCase = i == len(SCREAMING_SNAKE_CASE_ ) - 1
_UpperCamelCase = get_down_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
_UpperCamelCase = get_mid_block(
SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , )
# up
_UpperCamelCase = list(reversed(SCREAMING_SNAKE_CASE_ ) )
_UpperCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCamelCase = out_channels
else:
_UpperCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase = output_channel
_UpperCamelCase = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels
)
_UpperCamelCase = i == len(SCREAMING_SNAKE_CASE_ ) - 1
_UpperCamelCase = get_up_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = output_channel
# out
_UpperCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCamelCase = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps[None].to(sample.device )
_UpperCamelCase = self.time_proj(SCREAMING_SNAKE_CASE_ )
if self.config.use_timestep_embedding:
_UpperCamelCase = self.time_mlp(SCREAMING_SNAKE_CASE_ )
else:
_UpperCamelCase = timestep_embed[..., None]
_UpperCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCamelCase = ()
for downsample_block in self.down_blocks:
_UpperCamelCase = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCamelCase = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCamelCase = down_block_res_samples[-1:]
_UpperCamelCase = down_block_res_samples[:-1]
_UpperCamelCase = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
# 5. post-process
if self.out_block:
_UpperCamelCase = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
| 147 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase = True
except ImportError:
lowerCAmelCase = False
try:
from torch.hub import _get_torch_home
lowerCAmelCase = _get_torch_home()
except ImportError:
lowerCAmelCase = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
lowerCAmelCase = os.path.join(torch_cache_home, 'transformers')
lowerCAmelCase = '''https://cdn.huggingface.co'''
lowerCAmelCase = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
lowerCAmelCase = '''/'''.join(str(Path(__file__).resolve()).split('/')[:-1])
lowerCAmelCase = os.path.join(PATH, 'config.yaml')
lowerCAmelCase = os.path.join(PATH, 'attributes.txt')
lowerCAmelCase = os.path.join(PATH, 'objects.txt')
lowerCAmelCase = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
lowerCAmelCase = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase = '''pytorch_model.bin'''
lowerCAmelCase = '''config.yaml'''
def _a ( SCREAMING_SNAKE_CASE=OBJECTS , SCREAMING_SNAKE_CASE=ATTRIBUTES ):
"""simple docstring"""
lowercase__ = []
with open(__A ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowercase__ = []
with open(__A ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
with open(__A , '''rb''' ) as f:
lowercase__ = pkl.load(__A )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
lowercase__ = ckp.pop(__A )
if isinstance(__A , np.ndarray ):
lowercase__ = torch.tensor(__A )
else:
assert isinstance(__A , torch.tensor ), type(__A )
lowercase__ = v
return r
class _a :
_lowercase : Union[str, Any] = {}
def __init__( self: Any , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] = "root" , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = name
lowercase__ = level
lowercase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowercase__ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
lowercase__ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Config(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , level=level + 1 )
lowercase__ = v
setattr(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = d
def __repr__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self: int , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = val
lowercase__ = val
lowercase__ = key.split('''.''' )
lowercase__ = len(SCREAMING_SNAKE_CASE_ ) - 1
lowercase__ = self._pointer
if len(SCREAMING_SNAKE_CASE_ ) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE_ ):
if hasattr(self , SCREAMING_SNAKE_CASE_ ) and isinstance(getattr(self , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
setattr(getattr(self , SCREAMING_SNAKE_CASE_ ) , '''.'''.join(levels[i:] ) , SCREAMING_SNAKE_CASE_ )
if l == last_level:
lowercase__ = val
else:
lowercase__ = pointer[l]
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
return self._pointer
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: Tuple ) -> Optional[Any]:
"""simple docstring"""
with open(f'{file_name}' , '''w''' ) as stream:
dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Tuple ) -> Any:
"""simple docstring"""
with open(f'{file_name}' , '''w''' ) as stream:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ ) as stream:
lowercase__ = load(SCREAMING_SNAKE_CASE_ , Loader=SCREAMING_SNAKE_CASE_ )
return data
def __str__( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = """ """
if self._name != "root":
lowercase__ = f'{t * (self._level-1)}{self._name}:\n'
else:
lowercase__ = """"""
lowercase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE_ ).__name__})\n'
lowercase__ = level
return r[:-1]
@classmethod
def lowerCamelCase_ ( cls: str , UpperCamelCase_: Dict , **UpperCamelCase_: int ) -> Any:
"""simple docstring"""
lowercase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return cls(SCREAMING_SNAKE_CASE_ )
@classmethod
def lowerCamelCase_ ( cls: str , UpperCamelCase_: Any , **UpperCamelCase_: Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ = kwargs.pop('''cache_dir''' , SCREAMING_SNAKE_CASE_ )
lowercase__ = kwargs.pop('''force_download''' , SCREAMING_SNAKE_CASE_ )
lowercase__ = kwargs.pop('''resume_download''' , SCREAMING_SNAKE_CASE_ )
lowercase__ = kwargs.pop('''proxies''' , SCREAMING_SNAKE_CASE_ )
lowercase__ = kwargs.pop('''local_files_only''' , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif os.path.isfile(SCREAMING_SNAKE_CASE_ ) or is_remote_url(SCREAMING_SNAKE_CASE_ ):
lowercase__ = pretrained_model_name_or_path
else:
lowercase__ = hf_bucket_url(SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , use_cdn=SCREAMING_SNAKE_CASE_ )
try:
# Load from URL or cache if already cached
lowercase__ = cached_path(
SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowercase__ = Config.load_yaml(SCREAMING_SNAKE_CASE_ )
except EnvironmentError:
lowercase__ = """Can't load config for"""
raise EnvironmentError(SCREAMING_SNAKE_CASE_ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(SCREAMING_SNAKE_CASE_ ), kwargs
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
lowercase__ = in_tensor.numpy()
lowercase__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__A , __A , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(__A , __A , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = urlparse(__A )
return parsed.scheme in ("http", "https")
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
lowercase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowercase__ = """/""" not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
lowercase__ = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__A , __A ):
ua += "; " + "; ".join('''{}/{}'''.format(__A , __A ) for k, v in user_agent.items() )
elif isinstance(__A , __A ):
ua += "; " + user_agent
lowercase__ = {"""user-agent""": ua}
if resume_size > 0:
lowercase__ = """bytes=%d-""" % (resume_size,)
lowercase__ = requests.get(__A , stream=__A , proxies=__A , headers=__A )
if response.status_code == 4_16: # Range not satisfiable
return
lowercase__ = response.headers.get('''Content-Length''' )
lowercase__ = resume_size + int(__A ) if content_length is not None else None
lowercase__ = tqdm(
unit='''B''' , unit_scale=__A , total=__A , initial=__A , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__A ) )
temp_file.write(__A )
progress.close()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(__A , __A ):
lowercase__ = str(__A )
os.makedirs(__A , exist_ok=__A )
lowercase__ = None
if not local_files_only:
try:
lowercase__ = requests.head(__A , allow_redirects=__A , proxies=__A , timeout=__A )
if response.status_code == 2_00:
lowercase__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowercase__ = url_to_filename(__A , __A )
# get cache path to put the file
lowercase__ = os.path.join(__A , __A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__A ):
return cache_path
else:
lowercase__ = [
file
for file in fnmatch.filter(os.listdir(__A ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(__A ) > 0:
return os.path.join(__A , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(__A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowercase__ = cache_path + """.lock"""
with FileLock(__A ):
# If the download just completed while the lock was activated.
if os.path.exists(__A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowercase__ = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__A , '''a+b''' ) as f:
yield f
lowercase__ = _resumable_file_manager
if os.path.exists(__A ):
lowercase__ = os.stat(__A ).st_size
else:
lowercase__ = 0
else:
lowercase__ = partial(tempfile.NamedTemporaryFile , dir=__A , delete=__A )
lowercase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , __A , temp_file.name , )
http_get(
__A , __A , proxies=__A , resume_size=__A , user_agent=__A , )
os.replace(temp_file.name , __A )
lowercase__ = {"""url""": url, """etag""": etag}
lowercase__ = cache_path + """.json"""
with open(__A , '''w''' ) as meta_file:
json.dump(__A , __A )
return cache_path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = url.encode('''utf-8''' )
lowercase__ = shaaaa(__A )
lowercase__ = url_hash.hexdigest()
if etag:
lowercase__ = etag.encode('''utf-8''' )
lowercase__ = shaaaa(__A )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(__A , __A ):
lowercase__ = str(__A )
if isinstance(__A , __A ):
lowercase__ = str(__A )
if is_remote_url(__A ):
# URL, so get it from the cache (downloading if necessary)
lowercase__ = get_from_cache(
__A , cache_dir=__A , force_download=__A , proxies=__A , resume_download=__A , user_agent=__A , local_files_only=__A , )
elif os.path.exists(__A ):
# File, and it exists.
lowercase__ = url_or_filename
elif urlparse(__A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(__A ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(__A ) )
if extract_compressed_file:
if not is_zipfile(__A ) and not tarfile.is_tarfile(__A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowercase__ = os.path.split(__A )
lowercase__ = output_file.replace('''.''' , '''-''' ) + """-extracted"""
lowercase__ = os.path.join(__A , __A )
if os.path.isdir(__A ) and os.listdir(__A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowercase__ = output_path + """.lock"""
with FileLock(__A ):
shutil.rmtree(__A , ignore_errors=__A )
os.makedirs(__A )
if is_zipfile(__A ):
with ZipFile(__A , '''r''' ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
elif tarfile.is_tarfile(__A ):
lowercase__ = tarfile.open(__A )
tar_file.extractall(__A )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(__A ) )
return output_path_extracted
return output_path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="," ):
"""simple docstring"""
assert isinstance(__A , __A )
if os.path.isfile(__A ):
with open(__A ) as f:
lowercase__ = eval(f.read() )
else:
lowercase__ = requests.get(__A )
try:
lowercase__ = requests.json()
except Exception:
lowercase__ = req.content.decode()
assert data is not None, "could not connect"
try:
lowercase__ = eval(__A )
except Exception:
lowercase__ = data.split('''\n''' )
req.close()
return data
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = requests.get(__A )
lowercase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__A )
with open(__A , '''rb''' ) as stream:
lowercase__ = pkl.load(__A )
lowercase__ = weights.pop('''model''' )
lowercase__ = {}
for k, v in model.items():
lowercase__ = torch.from_numpy(__A )
if "running_var" in k:
lowercase__ = torch.tensor([0] )
lowercase__ = k.replace('''running_var''' , '''num_batches_tracked''' )
lowercase__ = zero
return new
def _a ( ):
"""simple docstring"""
print(f'{os.path.abspath(os.path.join(__A , os.pardir ) )}/demo.ipynb' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="RGB" ):
"""simple docstring"""
assert isinstance(__A , __A )
if os.path.isfile(__A ):
lowercase__ = cva.imread(__A )
else:
lowercase__ = get_image_from_url(__A )
assert img is not None, f'could not connect to: {im}'
lowercase__ = cva.cvtColor(__A , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowercase__ = img[:, :, ::-1]
return img
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(__A ) , __A ))
| 43 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
_SCREAMING_SNAKE_CASE : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_SCREAMING_SNAKE_CASE : List[Any] = [0 for i in range(0 , 4 * size )]
_SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCAmelCase_ ( self , __snake_case ):
return idx * 2
def UpperCAmelCase_ ( self , __snake_case ):
return idx * 2 + 1
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
if left_element == right_element:
_SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
_SCREAMING_SNAKE_CASE : List[str] = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.build(self.right(SCREAMING_SNAKE_CASE_ ) , mid + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : str = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if self.flag[idx] is True:
_SCREAMING_SNAKE_CASE : int = self.lazy[idx]
_SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
_SCREAMING_SNAKE_CASE : int = self.lazy[idx]
_SCREAMING_SNAKE_CASE : List[str] = self.lazy[idx]
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
_SCREAMING_SNAKE_CASE : str = val
_SCREAMING_SNAKE_CASE : Optional[Any] = val
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : List[Any] = True
return True
_SCREAMING_SNAKE_CASE : List[str] = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.update(self.right(SCREAMING_SNAKE_CASE_ ) , mid + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Tuple = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
return True
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if self.flag[idx] is True:
_SCREAMING_SNAKE_CASE : List[Any] = self.lazy[idx]
_SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
_SCREAMING_SNAKE_CASE : List[str] = self.lazy[idx]
_SCREAMING_SNAKE_CASE : int = self.lazy[idx]
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_SCREAMING_SNAKE_CASE : List[Any] = (left_element + right_element) // 2
_SCREAMING_SNAKE_CASE : List[str] = self.query(self.left(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.query(self.right(SCREAMING_SNAKE_CASE_ ) , mid + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __str__( self ):
return str([self.query(1 , 1 , self.size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase_ : Union[str, Any] = 15
UpperCAmelCase_ : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 533 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCAmelCase : Union[str, Any] = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 242 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase : Union[str, Any] = {'''UserAgent''': UserAgent().random}
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : str =script.contents[0]
a__ : List[str] =json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =F'''https://www.instagram.com/{username}/'''
a__ : List[Any] =self.get_json()
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : Optional[int] =requests.get(self.url , headers=SCREAMING_SNAKE_CASE_ ).text
a__ : int =BeautifulSoup(SCREAMING_SNAKE_CASE_ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> Union[str, Any]:
'''simple docstring'''
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.user_data["username"]
@property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.user_data["full_name"]
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self.user_data["biography"]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return self.user_data["business_email"]
@property
def _lowercase ( self ) -> Any:
'''simple docstring'''
return self.user_data["external_url"]
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.user_data["is_private"]
def _A ( SCREAMING_SNAKE_CASE : str = "github" ):
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
a__ : List[str] =InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : int = InstagramUser("""github""")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 563 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self : Dict,__A : Tuple = True,__A : str = None,__A : Optional[int] = PILImageResampling.BICUBIC,__A : Optional[int] = True,__A : Dict = None,__A : List[Any] = True,__A : List[str] = 1 / 2_5_5,__A : List[str] = True,__A : str = None,__A : int = None,__A : Union[str, Any] = True,**__A : List[str],):
super().__init__(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = size if size is not None else {"""shortest_edge""": 2_2_4}
_lowerCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_,default_to_square=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_lowerCamelCase : str = get_size_dict(SCREAMING_SNAKE_CASE_,default_to_square=SCREAMING_SNAKE_CASE_,param_name="crop_size" )
_lowerCamelCase : Any = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : List[Any] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : str = crop_size
_lowerCamelCase : List[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_normalize
_lowerCamelCase : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : Any = do_convert_rgb
def lowerCamelCase_ ( self : Any,__A : List[Any],__A : str,__A : Optional[Any] = PILImageResampling.BICUBIC,__A : List[Any] = None,**__A : Optional[int],):
_lowerCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_lowerCamelCase : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_,size=size["shortest_edge"],default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_,size=SCREAMING_SNAKE_CASE_,resample=SCREAMING_SNAKE_CASE_,data_format=SCREAMING_SNAKE_CASE_,**SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Dict,__A : Any,__A : Optional[int],__A : List[Any] = None,**__A : List[Any],):
_lowerCamelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_,size=(size["height"], size["width"]),data_format=SCREAMING_SNAKE_CASE_,**SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[Any],__A : Optional[int] = None,**__A : Union[str, Any],):
return rescale(SCREAMING_SNAKE_CASE_,scale=SCREAMING_SNAKE_CASE_,data_format=SCREAMING_SNAKE_CASE_,**SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[Any],__A : List[Any],__A : str,__A : int,__A : Union[str, Any] = None,**__A : int,):
return normalize(SCREAMING_SNAKE_CASE_,mean=SCREAMING_SNAKE_CASE_,std=SCREAMING_SNAKE_CASE_,data_format=SCREAMING_SNAKE_CASE_,**SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Union[str, Any],__A : List[Any],__A : str = None,__A : Union[str, Any] = None,__A : Tuple = None,__A : Any = None,__A : Dict = None,__A : Optional[int] = None,__A : int = None,__A : List[Any] = None,__A : Union[str, Any] = None,__A : str = None,__A : Tuple = None,__A : Union[str, Any] = None,__A : Union[str, Any] = ChannelDimension.FIRST,**__A : str,):
_lowerCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Union[str, Any] = size if size is not None else self.size
_lowerCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_,param_name="size",default_to_square=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Any = resample if resample is not None else self.resample
_lowerCamelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_,param_name="crop_size",default_to_square=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Tuple = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
_lowerCamelCase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : List[str] = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
_lowerCamelCase : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_,size=SCREAMING_SNAKE_CASE_,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
_lowerCamelCase : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
_lowerCamelCase : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
_lowerCamelCase : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE_,mean=SCREAMING_SNAKE_CASE_,std=SCREAMING_SNAKE_CASE_ ) for image in images]
_lowerCamelCase : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ ) for image in images]
_lowerCamelCase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_,tensor_type=SCREAMING_SNAKE_CASE_ ) | 44 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 400_0000 ):
"""simple docstring"""
lowercase_ : int = []
lowercase_ : Any = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__A )
lowercase_ : Optional[Any] = b, a + b
return sum(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 620 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : str = []
A_ : Optional[int] = []
A_ : str = []
for rt in rc.restypes:
A_ : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : List[str] = {name: i for i, name in enumerate(__A )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
A_ : List[Any] = torch.tensor(
__A , dtype=torch.intaa , device=protein['''aatype'''].device , )
A_ : int = torch.tensor(
__A , dtype=torch.intaa , device=protein['''aatype'''].device , )
A_ : Tuple = torch.tensor(
__A , dtype=torch.floataa , device=protein['''aatype'''].device , )
A_ : List[str] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Any = restype_atomaa_to_atomaa[protein_aatype]
A_ : Dict = restype_atomaa_mask[protein_aatype]
A_ : str = residx_atomaa_mask
A_ : Optional[int] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : int = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Union[str, Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : int = rc.atom_order[atom_name]
A_ : Union[str, Any] = 1
A_ : Any = restype_atomaa_mask[protein_aatype]
A_ : Union[str, Any] = residx_atomaa_mask
return protein
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Tuple = tree_map(lambda SCREAMING_SNAKE_CASE : torch.tensor(__A , device=batch['''aatype'''].device ) , __A , np.ndarray )
A_ : int = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : np.array(__A ) , make_atomaa_masks(__A ) )
return out
| 590 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a : str = logging.get_logger(__name__)
__a : Dict = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : Optional[int] = '''convnextv2'''
def __init__( self , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="gelu" , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0.0 , lowerCamelCase=224 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : List[Any] = num_stages
lowercase__ : Union[str, Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowercase__ : Optional[Any] = [3, 3, 9, 3] if depths is None else depths
lowercase__ : List[Any] = hidden_act
lowercase__ : Optional[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : str = drop_path_rate
lowercase__ : Any = image_size
lowercase__ : Tuple = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowercase__ : str = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names ) | 397 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def A ( *_A : str , **_A : List[Any] ) -> Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase):
a_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : str = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
UpperCAmelCase_ : List[Any] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def A ( self : Optional[Any] , _A : Tuple , _A : List[Any] ) -> Tuple:
UpperCAmelCase_ : List[str] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase_ : Dict = len(SCREAMING_SNAKE_CASE_ )
self.assertGreater(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
}
for i in range(SCREAMING_SNAKE_CASE_ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def A ( self : str ) -> Dict:
pass
@require_torch
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : str = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
UpperCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
] , )
UpperCAmelCase_ : Optional[int] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
]
] , )
@require_torch
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase_ : List[str] = pipeline('''zero-shot-object-detection''' )
UpperCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
] , )
UpperCAmelCase_ : int = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def A ( self : Any ) -> Tuple:
pass
@require_torch
@slow
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : int = 0.2
UpperCAmelCase_ : Dict = pipeline('''zero-shot-object-detection''' )
UpperCAmelCase_ : int = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
] , )
@require_torch
@slow
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Dict = pipeline('''zero-shot-object-detection''' )
UpperCAmelCase_ : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
] , )
| 541 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
"""simple docstring"""
assert isinstance(__A , __A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any ):
"""simple docstring"""
a_ : str = tmp_path / """cache"""
a_ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : Any = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__A , keep_in_memory=__A ).read()
_check_sql_dataset(__A , __A )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
a_ : Optional[int] = tmp_path / """cache"""
a_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a_ : List[Any] = features.copy() if features else default_expected_features
a_ : Any = (
Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__A , cache_dir=__A ).read()
_check_sql_dataset(__A , __A )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(__A ) ) as con:
a_ : int = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
"""simple docstring"""
a_ : Tuple = tmp_path / """cache"""
a_ : List[Any] = os.path.join(__A , """tmp.sql""" )
a_ : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__A ).read()
SqlDatasetWriter(__A , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
a_ : Any = iter_sql_file(__A )
a_ : Tuple = iter_sql_file(__A )
for rowa, rowa in zip(__A , __A ):
assert rowa == rowa
@require_sqlalchemy
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
a_ : List[Any] = tmp_path / """cache"""
a_ : Tuple = os.path.join(__A , """tmp.sql""" )
a_ : Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__A ).read()
SqlDatasetWriter(__A , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
a_ : str = iter_sql_file(__A )
a_ : Optional[int] = iter_sql_file(__A )
for rowa, rowa in zip(__A , __A ):
assert rowa == rowa
@require_sqlalchemy
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
"""simple docstring"""
a_ : Optional[Any] = tmp_path / """cache"""
a_ : Dict = os.path.join(__A , """tmp.sql""" )
a_ : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__A ).read()
with pytest.raises(__A ):
SqlDatasetWriter(__A , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 442 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__lowerCAmelCase = False
class lowerCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 147 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''blenderbot-small'''
_lowercase : List[str] = ['''past_key_values''']
_lowercase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , UpperCamelCase_: Union[str, Any]=50_265 , UpperCamelCase_: Optional[int]=512 , UpperCamelCase_: List[str]=8 , UpperCamelCase_: Tuple=2_048 , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: List[Any]=2_048 , UpperCamelCase_: Any=16 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: str=True , UpperCamelCase_: str=True , UpperCamelCase_: List[Any]="gelu" , UpperCamelCase_: Any=512 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Union[str, Any]=1 , UpperCamelCase_: int=False , UpperCamelCase_: Tuple=0 , UpperCamelCase_: str=1 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: Union[str, Any]=2 , **UpperCamelCase_: str , ) -> int:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class _a ( UpperCamelCase__ ):
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = {0: """batch"""}
lowercase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase__ = {0: """batch""", 1: """decoder_sequence"""}
lowercase__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase__ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowercase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = super().outputs
else:
lowercase__ = super(SCREAMING_SNAKE_CASE_ , self ).outputs
if self.use_past:
lowercase__ = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase__ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCamelCase_ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Any = -1 , UpperCamelCase_: Tuple = -1 , UpperCamelCase_: List[Any] = False , UpperCamelCase_: str = None , ) -> Dict:
"""simple docstring"""
lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Generate decoder inputs
lowercase__ = seq_length if not self.use_past else 1
lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase__ = dict(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ = common_inputs["""input_ids"""].shape
lowercase__ = common_inputs["""decoder_input_ids"""].shape[1]
lowercase__ = self.num_attention_heads
lowercase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ = decoder_seq_length + 3
lowercase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase__ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowercase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase__ = self.num_layers
lowercase__ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - min_num_layers
lowercase__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
) )
# TODO: test this.
lowercase__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) )
return common_inputs
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] = -1 , UpperCamelCase_: Dict = -1 , UpperCamelCase_: Any = False , UpperCamelCase_: Tuple = None , ) -> Dict:
"""simple docstring"""
lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = self.num_layers
lowercase__ = self.num_attention_heads
lowercase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ = common_inputs["""attention_mask"""].dtype
lowercase__ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowercase__ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(SCREAMING_SNAKE_CASE_ )
]
return common_inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] = -1 , UpperCamelCase_: Optional[int] = -1 , UpperCamelCase_: List[str] = False , UpperCamelCase_: Tuple = None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE_ )
lowercase__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase__ = dict(tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) )
return common_inputs
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict = -1 , UpperCamelCase_: Union[str, Any] = -1 , UpperCamelCase_: List[str] = False , UpperCamelCase_: Optional[Any] = None , ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
elif self.task == "causal-lm":
lowercase__ = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return common_inputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: Dict ) -> Optional[int]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = super(SCREAMING_SNAKE_CASE_ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 43 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 533 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=0.1 , __snake_case=0.1 , __snake_case=224 , __snake_case=1000 , __snake_case=[3, 3, 6, 4] , __snake_case=[48, 56, 112, 220] , ) -> Dict:
'''simple docstring'''
__a =parent
__a =batch_size
__a =num_channels
__a =is_training
__a =use_labels
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =num_labels
__a =image_size
__a =layer_depths
__a =embed_dims
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.num_labels )
__a =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=SCREAMING_SNAKE_CASE_ , layer_scale_init_value=1e-5 , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
__a =SwiftFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__a =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =self.num_labels
__a =SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__a =model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__a =SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
(__a) =self.prepare_config_and_inputs()
__a ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =SwiftFormerModelTester(self )
__a =ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =model_class(SCREAMING_SNAKE_CASE_ )
__a =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =model_class(SCREAMING_SNAKE_CASE_ )
__a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a =[*signature.parameters.keys()]
__a =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
__a =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__a =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__a =outputs.hidden_states
__a =8
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a =True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
def _config_zero_init(__snake_case ):
__a =copy.deepcopy(SCREAMING_SNAKE_CASE_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1e-10 )
if isinstance(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
__a =_config_zero_init(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return configs_no_init
__a =self.model_tester.prepare_config_and_inputs_for_common()
__a =_config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__a =model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase_( ):
"""simple docstring"""
__a =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(SCREAMING_SNAKE_CASE_ )
__a =self.default_image_processor
__a =prepare_img()
__a =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__a =model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__a =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__a =torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 242 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
def _A ( SCREAMING_SNAKE_CASE : int = 100 ):
"""simple docstring"""
a__ : Tuple =0
a__ : Tuple =0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {'''vocab_file''': '''spm_char.model'''}
UpperCAmelCase_ : Tuple = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
UpperCAmelCase_ : Optional[int] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int],__A : Any,__A : Dict="<s>",__A : Tuple="</s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Tuple = None,**__A : Union[str, Any],):
_lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_,eos_token=SCREAMING_SNAKE_CASE_,unk_token=SCREAMING_SNAKE_CASE_,pad_token=SCREAMING_SNAKE_CASE_,sp_model_kwargs=self.sp_model_kwargs,**SCREAMING_SNAKE_CASE_,)
_lowerCamelCase : Optional[Any] = vocab_file
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def lowerCamelCase_ ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
_lowerCamelCase : Dict = self.__dict__.copy()
_lowerCamelCase : Dict = None
return state
def __setstate__( self : int,__A : Optional[Any] ):
_lowerCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str],__A : Tuple ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_,out_type=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Any,__A : str ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Union[str, Any],__A : str ):
_lowerCamelCase : Optional[Any] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def lowerCamelCase_ ( self : List[Any],__A : int ):
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
_lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : Dict=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : Any = None,__A : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_,token_ids_a=SCREAMING_SNAKE_CASE_,already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = [1]
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def lowerCamelCase_ ( self : int,__A : List[Any],__A : int = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_,"wb" ) as fi:
_lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,) | 44 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if len(__A ) != len(__A ):
raise ValueError("String lengths must match!" )
lowercase_ : List[Any] = 0
for chara, chara in zip(__A , __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
from __future__ import annotations
UpperCamelCase = [True] * 100_0001
UpperCamelCase = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
UpperCamelCase = False
i += 1
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return seive[n]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return any(digit in '''02468''' for digit in str(__A ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1_000_000 ):
A_ : Optional[int] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__A ) and not contains_an_even_digit(__A ):
A_ : Any = str(__A )
A_ : Tuple = [int(str_num[j:] + str_num[:j] ) for j in range(len(__A ) )]
if all(is_prime(__A ) for i in list_nums ):
result.append(__A )
return result
def _SCREAMING_SNAKE_CASE ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 590 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
from ... import PretrainedConfig
__a : Union[str, Any] = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Union[str, Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a : int = '''nezha'''
def __init__( self , lowerCamelCase=21128 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=64 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0.1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , **lowerCamelCase , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase__ : int = vocab_size
lowercase__ : str = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : str = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Tuple = max_relative_position
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Optional[int] = classifier_dropout
lowercase__ : Optional[int] = use_cache | 397 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCamelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_UpperCamelCase : str = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case__ ( unittest.TestCase):
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
UpperCAmelCase_ : str = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : str = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def A ( self : List[str] , _A : List[Any] , _A : Dict , _A : Optional[int] , _A : List[Any]=None ) -> Tuple:
UpperCAmelCase_ : str = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase_ : Tuple = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase_ : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
UpperCAmelCase_ : int = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE_ )
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Any = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A ( self : int ) -> Any:
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with a really long name
UpperCAmelCase_ : Optional[int] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE_ ) , )
| 541 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
a_ : Tuple = 1
a_ : Union[str, Any] = 1
while repunit:
a_ : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int = 100_0000 ):
"""simple docstring"""
a_ : Union[str, Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 442 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _lowercase ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCamelCase = g.get_repo("huggingface/accelerate" )
_UpperCamelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=__A )
_UpperCamelCase = comments[0] if len(__A ) > 0 else None
_UpperCamelCase = dt.utcnow()
_UpperCamelCase = (current_time - issue.updated_at).days
_UpperCamelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 147 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join(sorted(__A ) )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return word_by_signature[signature(__A )]
lowerCAmelCase = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 43 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
from collections.abc import Callable
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : float = a
_SCREAMING_SNAKE_CASE : float = b
if function(__A ) == 0: # one of the a or b is a root for the function
return a
elif function(__A ) == 0:
return b
elif (
function(__A ) * function(__A ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
_SCREAMING_SNAKE_CASE : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__A ) == 0:
return mid
elif function(__A ) * function(__A ) < 0:
_SCREAMING_SNAKE_CASE : List[str] = mid
else:
_SCREAMING_SNAKE_CASE : int = mid
_SCREAMING_SNAKE_CASE : int = start + (end - start) / 2.0
return mid
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 533 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCAmelCase : Union[str, Any] = '''true'''
def UpperCamelCase_( _snake_case : str , _snake_case : Dict=82 , _snake_case : List[Any]=16 ):
"""simple docstring"""
set_seed(42 )
__a =RegressionModel()
__a =deepcopy(__A )
__a =RegressionDataset(length=__A )
__a =DataLoader(__A , batch_size=__A )
model.to(accelerator.device )
__a =accelerator.prepare(__A , __A )
return model, ddp_model, dataloader
def UpperCamelCase_( _snake_case : Accelerator , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
__a =load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(_snake_case : Tuple ):
__a =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__A , max_length=__A )
return outputs
with accelerator.main_process_first():
__a =dataset.map(
__A , batched=__A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
__a =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_snake_case : List[str] ):
if use_longest:
return tokenizer.pad(__A , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__A , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__A , shuffle=__A , collate_fn=__A , batch_size=16 )
def UpperCamelCase_( _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator(dispatch_batches=__A , split_batches=__A )
__a =get_dataloader(__A , not dispatch_batches )
__a =AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__A )
__a =accelerator.prepare(__A , __A )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCamelCase_( _snake_case : Any , _snake_case : Dict , _snake_case : Any ):
"""simple docstring"""
__a =[]
for batch in dataloader:
__a =batch.values()
with torch.no_grad():
__a =model(__A )
__a =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__a =[], []
for logit, targ in logits_and_targets:
logits.append(__A )
targs.append(__A )
__a =torch.cat(__A ), torch.cat(__A )
return logits, targs
def UpperCamelCase_( _snake_case : Accelerator , _snake_case : Union[str, Any]=82 , _snake_case : Any=False , _snake_case : Dict=False , _snake_case : Tuple=16 ):
"""simple docstring"""
__a =get_basic_setup(__A , __A , __A )
__a =generate_predictions(__A , __A , __A )
assert (
len(__A ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__A )}'
def UpperCamelCase_( _snake_case : bool = False , _snake_case : bool = False ):
"""simple docstring"""
__a =evaluate.load('glue' , 'mrpc' )
__a =get_mrpc_setup(__A , __A )
# First do baseline
__a =setup["""no"""]
model.to(__A )
model.eval()
for batch in dataloader:
batch.to(__A )
with torch.inference_mode():
__a =model(**__A )
__a =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__A , references=batch['labels'] )
__a =metric.compute()
# Then do distributed
__a =setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__a =model(**__A )
__a =outputs.logits.argmax(dim=-1 )
__a =batch["""labels"""]
__a =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__A , references=__A )
__a =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def UpperCamelCase_( ):
"""simple docstring"""
__a =Accelerator(split_batches=__A , dispatch_batches=__A )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(__A , __A )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__a =Accelerator(split_batches=__A , dispatch_batches=__A )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(__A , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
__a =Accelerator()
test_torch_metrics(__A , 512 )
accelerator.state._reset_state()
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 242 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowerCAmelCase :
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : Any =TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : Optional[int] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : Any =UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : Dict =DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
a__ : Tuple =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : Optional[int] =TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : Union[str, Any] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : int =UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : List[Any] =DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
a__ : List[Any] =DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
a__ : int =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Any =self.get_dummy_components()
a__ : Any =self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
a__ : List[Any] =self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
a__ : int =inputs["""prompt"""]
a__ : Dict =inputs["""generator"""]
a__ : Optional[int] =inputs["""num_inference_steps"""]
a__ : Any =inputs["""output_type"""]
if "image" in inputs:
a__ : Any =inputs["""image"""]
else:
a__ : Tuple =None
if "mask_image" in inputs:
a__ : Any =inputs["""mask_image"""]
else:
a__ : List[Any] =None
if "original_image" in inputs:
a__ : str =inputs["""original_image"""]
else:
a__ : int =None
a__ : List[str] =pipe.encode_prompt(SCREAMING_SNAKE_CASE_ )
# inputs with prompt converted to embeddings
a__ : Union[str, Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
a__ : Any =image
if mask_image is not None:
a__ : List[str] =mask_image
if original_image is not None:
a__ : List[Any] =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] =pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
a__ : str =self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
a__ : Dict =self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] =inputs["""generator"""]
a__ : Tuple =inputs["""num_inference_steps"""]
a__ : Optional[Any] =inputs["""output_type"""]
# inputs with prompt converted to embeddings
a__ : List[Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
a__ : Dict =image
if mask_image is not None:
a__ : List[str] =mask_image
if original_image is not None:
a__ : List[Any] =original_image
a__ : List[str] =pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
a__ : Optional[int] =np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1E-4 )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =self.get_dummy_components()
a__ : Optional[Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
a__ : int =self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
a__ : int =pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
a__ : Tuple =self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
a__ : List[str] =self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] =pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
a__ : Optional[Any] =np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1E-4 )
| 563 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Dict = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 44 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 100 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ):
"""simple docstring"""
lowercase_ : Tuple = False
lowercase_ : List[Any] = search_prob
lowercase_ : Union[str, Any] = start_temperate
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = 0
lowercase_ : Any = None
while not search_end:
lowercase_ : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase_ : str = current_state
scores.append(__A )
iterations += 1
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase_ : str = random.randint(0 , len(__A ) - 1 ) # picking a random neighbor
lowercase_ : Optional[Any] = neighbors.pop(__A )
lowercase_ : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase_ : List[str] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase_ : Dict = picked_neighbor
else:
lowercase_ : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase_ : Union[str, Any] = picked_neighbor
lowercase_ : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase_ : Tuple = True
else:
lowercase_ : Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__A ) , __A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
UpperCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
UpperCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
| 620 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return str(__A ) == str(__A )[::-1]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return int(__A ) + int(str(__A )[::-1] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 10_000 ):
A_ : str = []
for num in range(1 , __A ):
A_ : Union[str, Any] = 0
A_ : str = num
while iterations < 50:
A_ : int = sum_reverse(__A )
iterations += 1
if is_palindrome(__A ):
break
else:
lychrel_nums.append(__A )
return len(__A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 590 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__a : int = '''
import os
'''
__a : Optional[Any] = '''
def foo():
import os
return False
'''
__a : Optional[int] = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__a : List[str] = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__a : Any = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__a : str = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__a : Optional[int] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__a : Optional[int] = '''
import os
try:
import bar
except:
raise ValueError()
'''
__a : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__a : Optional[Any] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__a : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" ,__A )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : List[Any] = os.path.join(__A ,"test_file.py" )
with open(__A ,"w" ) as _tmp_file:
_tmp_file.write(__A )
lowercase__ : Dict = get_imports(__A )
assert parsed_imports == ["os"] | 397 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( A : int , A : int ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCAmelCase ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 541 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : torch.FloatTensor
__magic_name__ : torch.FloatTensor
__magic_name__ : Optional[torch.FloatTensor] = None
class SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
__magic_name__ : str = 2
@register_to_config
def __init__( self : str , lowercase__ : Optional[int] = 0.02 , lowercase__ : List[Any] = 100 , lowercase__ : Optional[int] = 1.007 , lowercase__ : Tuple = 80 , lowercase__ : Optional[Any] = 0.05 , lowercase__ : List[Any] = 50 , ):
'''simple docstring'''
a_ : Union[str, Any] = sigma_max
# setable values
a_ : int = None
a_ : np.IntTensor = None
a_ : torch.FloatTensor = None # sigma(t_i)
def lowercase_ ( self : int , lowercase__ : Optional[int] , lowercase__ : Dict = None ):
'''simple docstring'''
return sample
def lowercase_ ( self : Optional[Any] , lowercase__ : str , lowercase__ : Tuple = None ):
'''simple docstring'''
a_ : Dict = num_inference_steps
a_ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a_ : str = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
a_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a_ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
a_ : Any = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
a_ : Optional[int] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
a_ : Union[str, Any] = sigma + gamma * sigma
a_ : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase_ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[str] = True , ):
'''simple docstring'''
a_ : Optional[int] = sample_hat + sigma_hat * model_output
a_ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
a_ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Optional[int] , lowercase__ : int , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Union[str, Any] = True , ):
'''simple docstring'''
a_ : int = sample_prev + sigma_prev * model_output
a_ : str = (sample_prev - pred_original_sample) / sigma_prev
a_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Optional[Any] , lowercase__ : Any , lowercase__ : Any , lowercase__ : int ):
'''simple docstring'''
raise NotImplementedError()
| 442 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase = None
lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _a :
_lowercase : bool = True
_lowercase : Optional[str] = None
# Automatically constructed
_lowercase : ClassVar[str] = "PIL.Image.Image"
_lowercase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowercase : str = field(default='''Image''' , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.pa_type
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = np.array(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
lowercase__ = {}
lowercase__ = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(SCREAMING_SNAKE_CASE_ ):
lowercase__ = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = path.split('''::''' )[-1]
try:
lowercase__ = string_to_dict(SCREAMING_SNAKE_CASE_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase__ = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ )
except ValueError:
lowercase__ = None
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' , use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
lowercase__ = BytesIO(f.read() )
lowercase__ = PIL.Image.open(bytes_ )
else:
lowercase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int ) -> Any:
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase__ = storage.field('''bytes''' )
else:
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase__ = storage.field('''path''' )
else:
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase__ = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase__ = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
lowercase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase_: Any ):
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
lowercase__ = f.read()
return bytes_
lowercase__ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__ = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def _a ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = BytesIO()
if image.format in list_image_compression_formats():
lowercase__ = image.format
else:
lowercase__ = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__A , format=__A )
return buffer.getvalue()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if hasattr(__A , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
lowercase__ = array.dtype
lowercase__ = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase__ = dtype.kind
lowercase__ = dtype.itemsize
lowercase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase__ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase__ = dtype_byteorder + dtype_kind + str(__A )
lowercase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowercase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
lowercase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
lowercase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
lowercase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 43 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if len(__A ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
_SCREAMING_SNAKE_CASE : int = False
if low == high:
return swapped
_SCREAMING_SNAKE_CASE : Union[str, Any] = low
_SCREAMING_SNAKE_CASE : List[str] = high
while left < right:
if collection[left] > collection[right]:
_SCREAMING_SNAKE_CASE : Tuple = (
collection[right],
collection[left],
)
_SCREAMING_SNAKE_CASE : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_SCREAMING_SNAKE_CASE : int = (
collection[right + 1],
collection[left],
)
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : str = low + int((high - low) / 2 )
_SCREAMING_SNAKE_CASE : Optional[Any] = circle_sort_util(__A , __A , __A )
_SCREAMING_SNAKE_CASE : Any = circle_sort_util(__A , mid + 1 , __A )
return swapped or left_swap or right_swap
_SCREAMING_SNAKE_CASE : int = True
while is_not_sorted is True:
_SCREAMING_SNAKE_CASE : Optional[int] = circle_sort_util(__A , 0 , len(__A ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Any = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 533 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = '''xlnet'''
SCREAMING_SNAKE_CASE = ['''mems''']
SCREAMING_SNAKE_CASE = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __snake_case=3_2000 , __snake_case=1024 , __snake_case=24 , __snake_case=16 , __snake_case=4096 , __snake_case="gelu" , __snake_case=True , __snake_case="bi" , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0.1 , __snake_case=512 , __snake_case=None , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=-1 , __snake_case=False , __snake_case="last" , __snake_case=True , __snake_case="tanh" , __snake_case=0.1 , __snake_case=5 , __snake_case=5 , __snake_case=5 , __snake_case=1 , __snake_case=2 , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =vocab_size
__a =d_model
__a =n_layer
__a =n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__a =d_model // n_head
__a =ff_activation
__a =d_inner
__a =untie_r
__a =attn_type
__a =initializer_range
__a =layer_norm_eps
__a =dropout
__a =mem_len
__a =reuse_len
__a =bi_data
__a =clamp_len
__a =same_length
__a =summary_type
__a =summary_use_proj
__a =summary_activation
__a =summary_last_dropout
__a =start_n_top
__a =end_n_top
__a =bos_token_id
__a =pad_token_id
__a =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
__a =kwargs["""use_cache"""]
__a =use_mems_eval
__a =use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 242 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__):
@register_to_config
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , ) -> Any:
'''simple docstring'''
super().__init__()
a__ : Union[str, Any] =nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a__ : List[str] =nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] =False
a__ : List[str] =nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] =TaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , d_model=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , d_kv=SCREAMING_SNAKE_CASE_ , d_ff=SCREAMING_SNAKE_CASE_ , dropout_rate=SCREAMING_SNAKE_CASE_ , feed_forward_proj=SCREAMING_SNAKE_CASE_ , is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , )
a__ : str =nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE_ ):
a__ : Any =TaBlock(SCREAMING_SNAKE_CASE_ )
self.encoders.append(SCREAMING_SNAKE_CASE_ )
a__ : int =TaLayerNorm(SCREAMING_SNAKE_CASE_ )
a__ : Dict =nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.token_embedder(SCREAMING_SNAKE_CASE_ )
a__ : List[str] =encoder_input_tokens.shape[1]
a__ : str =torch.arange(SCREAMING_SNAKE_CASE_ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] =self.dropout_pre(SCREAMING_SNAKE_CASE_ )
# inverted the attention mask
a__ : List[str] =encoder_input_tokens.size()
a__ : List[str] =self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for lyr in self.encoders:
a__ : Tuple =lyr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
a__ : Optional[int] =self.layer_norm(SCREAMING_SNAKE_CASE_ )
return self.dropout_post(SCREAMING_SNAKE_CASE_ ), encoder_inputs_mask
| 563 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=True ):
"""simple docstring"""
model.train()
_lowerCamelCase : List[Any] = model(__A )
_lowerCamelCase : int = F.mse_loss(__A , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__A )
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int=False ):
"""simple docstring"""
set_seed(42 )
_lowerCamelCase : Optional[Any] = RegressionModel()
_lowerCamelCase : Dict = deepcopy(__A )
_lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
_lowerCamelCase : Optional[Any] = DataLoader(__A , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
_lowerCamelCase : List[str] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_lowerCamelCase : int = LambdaLR(__A , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 )
_lowerCamelCase : Any = LambdaLR(__A , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
_lowerCamelCase : Optional[int] = accelerator.prepare(__A , __A , __A , __A )
else:
_lowerCamelCase : Any = accelerator.prepare(__A , __A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_training_setup(__A )
# Use a single batch
_lowerCamelCase : str = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCamelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
_lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__A , __A , __A , __A )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowerCamelCase : Tuple = ddp_input[torch.randperm(len(__A ) )]
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = get_training_setup(__A )
# Use a single batch
_lowerCamelCase : Any = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCamelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
_lowerCamelCase : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowerCamelCase : Optional[int] = ddp_input[torch.randperm(len(__A ) )]
def A_ ( _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=False ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCamelCase : Optional[int] = get_training_setup(__A )
for iteration, batch in enumerate(__A ):
_lowerCamelCase : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCamelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
_lowerCamelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A , __A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowerCamelCase : Tuple = ddp_input[torch.randperm(len(__A ) )]
GradientState._reset_state()
def A_ ( _lowerCAmelCase : Any=False , _lowerCAmelCase : str=False ):
"""simple docstring"""
_lowerCamelCase : Any = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCamelCase : int = get_training_setup(__A , __A )
for iteration, batch in enumerate(__A ):
_lowerCamelCase : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCamelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
_lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__A , __A , __A , __A , __A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_lowerCamelCase : List[str] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__A ))
if accelerator.num_processes > 1:
check_model_parameters(__A , __A , __A , __A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = Accelerator()
_lowerCamelCase : Dict = RegressionDataset(length=80 )
_lowerCamelCase : Tuple = DataLoader(__A , batch_size=16 )
_lowerCamelCase : Tuple = RegressionDataset(length=96 )
_lowerCamelCase : Optional[Any] = DataLoader(__A , batch_size=16 )
_lowerCamelCase : Any = accelerator.prepare(__A , __A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if iteration < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if batch_num < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(__A , __A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(__A , __A )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 44 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase__ = '''3'''
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 620 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = CustomTokenizer
pass
| 590 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,) -> float:
lowercase__ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase__ : List[Any] = 1 - (matter_density + radiation_density + dark_energy)
lowercase__ : Tuple = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase__ : Tuple = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__a : Tuple = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 397 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = 42
class snake_case__ ( UpperCamelCase , UpperCamelCase):
a_ = 1
@register_to_config
def __init__( self : int , _A : Union[str, Any] = 20_00 , _A : Optional[Any] = 0.15 , _A : Optional[Any] = 0.01 , _A : List[str] = 13_48.0 , _A : List[Any] = 1e-5 , _A : Dict = 1 , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = sigma_max
# setable values
UpperCAmelCase_ : Any = None
self.set_sigmas(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A ( self : List[Any] , _A : Any , _A : int = None ) -> Optional[Any]:
return sample
def A ( self : int , _A : Tuple , _A : List[Any] = None , _A : Union[str, Any] = None ) -> Union[str, Any]:
UpperCAmelCase_ : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase_ : str = torch.linspace(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
def A ( self : Dict , _A : Union[str, Any] , _A : Any = None , _A : str = None , _A : List[str] = None ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase_ : str = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase_ : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase_ : List[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE_ ) , math.log(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_ : Optional[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def A ( self : List[str] , _A : Union[str, Any] , _A : Dict ) -> Tuple:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def A ( self : str , _A : Optional[Any] , _A : Optional[int] , _A : str , _A : Union[str, Any] = None , _A : Union[str, Any] = True , ) -> Union[str, Any]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCAmelCase_ : Optional[Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase_ : List[str] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase_ : Dict = timesteps.to(self.discrete_sigmas.device )
UpperCAmelCase_ : Any = self.discrete_sigmas[timesteps].to(sample.device )
UpperCAmelCase_ : str = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(sample.device )
UpperCAmelCase_ : Tuple = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase_ : Tuple = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCAmelCase_ : Dict = diffusion.unsqueeze(-1 )
UpperCAmelCase_ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase_ : Tuple = randn_tensor(
sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ , device=sample.device , dtype=sample.dtype )
UpperCAmelCase_ : Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase_ : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE_ , prev_sample_mean=SCREAMING_SNAKE_CASE_ )
def A ( self : int , _A : Union[str, Any] , _A : Tuple , _A : Any = None , _A : Optional[int] = True , ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase_ : List[str] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase_ : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase_ : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase_ : Optional[int] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCAmelCase_ : Union[str, Any] = step_size.unsqueeze(-1 )
UpperCAmelCase_ : Optional[Any] = sample + step_size * model_output
UpperCAmelCase_ : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def A ( self : str , _A : str , _A : Optional[int] , _A : Dict , ) -> str:
UpperCAmelCase_ : Optional[Any] = timesteps.to(original_samples.device )
UpperCAmelCase_ : Any = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCAmelCase_ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE_ ) * sigmas[:, None, None, None]
)
UpperCAmelCase_ : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self : Dict ) -> Optional[Any]:
return self.config.num_train_timesteps
| 541 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : int = '''gpt_neox_japanese'''
def __init__( self : List[Any] , lowercase__ : Any=3_2000 , lowercase__ : List[str]=2560 , lowercase__ : int=32 , lowercase__ : Dict=32 , lowercase__ : Union[str, Any]=4 , lowercase__ : Dict="gelu" , lowercase__ : Any=1.00 , lowercase__ : Dict=1_0000 , lowercase__ : Tuple=2048 , lowercase__ : List[str]=0.02 , lowercase__ : Optional[int]=1e-5 , lowercase__ : List[str]=True , lowercase__ : List[str]=3_1996 , lowercase__ : Any=3_1999 , lowercase__ : str=0.1 , lowercase__ : Union[str, Any]=0.0 , **lowercase__ : List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
a_ : Optional[Any] = vocab_size
a_ : Optional[int] = max_position_embeddings
a_ : List[str] = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : Optional[Any] = num_attention_heads
a_ : Tuple = intermediate_multiple_size
a_ : Optional[int] = hidden_act
a_ : List[str] = rotary_pct
a_ : str = rotary_emb_base
a_ : Any = initializer_range
a_ : str = layer_norm_eps
a_ : Optional[Any] = use_cache
a_ : Dict = attention_dropout
a_ : List[str] = hidden_dropout
| 442 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowerCAmelCase = parser.parse_args()
if args.model_type == "bert":
__lowerCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase = '''bert'''
else:
raise ValueError("""args.model_type should be \"bert\".""")
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCAmelCase = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
__lowerCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowerCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowerCAmelCase = state_dict['''cls.predictions.decoder.weight''']
__lowerCAmelCase = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[F'''cls.predictions.transform.dense.{w}''']
__lowerCAmelCase = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 147 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase__ = cst_fwd.get(__A , np.inf )
lowercase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase__ = new_cost_f
lowercase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = -1
lowercase__ = set()
lowercase__ = set()
lowercase__ = {source: 0}
lowercase__ = {destination: 0}
lowercase__ = {source: None}
lowercase__ = {destination: None}
lowercase__ = PriorityQueue()
lowercase__ = PriorityQueue()
lowercase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase__ = queue_forward.get()
visited_forward.add(__A )
lowercase__ = queue_backward.get()
visited_backward.add(__A )
lowercase__ = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
lowercase__ = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase__ = shortest_distance
return shortest_path_distance
lowerCAmelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowerCAmelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 400_0000 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [0, 1]
_SCREAMING_SNAKE_CASE : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_SCREAMING_SNAKE_CASE : Tuple = 0
for j in range(len(__A ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 533 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
from manim import *
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =Rectangle(height=0.5 , width=0.5 )
__a =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a =[mem.copy() for i in range(6 )]
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__a =VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__a =VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__a =Text('CPU' , font_size=24 )
__a =Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__a =[mem.copy() for i in range(1 )]
__a =VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__a =Text('GPU' , font_size=24 )
__a =Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE_ )
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__a =Text('Model' , font_size=24 )
__a =Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , )
__a =MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
__a =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE_ ) , Write(SCREAMING_SNAKE_CASE_ ) )
self.add(SCREAMING_SNAKE_CASE_ )
__a =[]
__a =[]
__a =[]
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__a =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE_ )
cpu_target.generate_target()
__a =0.46 / 4
__a =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE_ ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE_ )
self.play(*SCREAMING_SNAKE_CASE_ )
self.wait()
| 242 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
UpperCAmelCase : dict[tuple[int, int, int], int] = {}
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
a__ : Optional[Any] =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
a__ : Union[str, Any] =_calculate(days - 1 , __A , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
a__ : str =_calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
a__ : List[str] =_calculate(days - 1 , __A , 0 )
a__ : List[str] =state_late + state_absent + state_ontime
a__ : List[Any] =prizestrings
return prizestrings
def _A ( SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
return _calculate(__A , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 563 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : Optional[Any],__A : Dict=1_3,__A : int=7,__A : int=False,__A : Union[str, Any]=True,__A : str=False,__A : List[Any]=False,__A : Dict=1_9,__A : Dict=3_2,__A : str=5,__A : Any=4,__A : Union[str, Any]=3_7,__A : Dict="gelu",__A : Dict=0.1,__A : List[str]=0.1,__A : Optional[int]=5_1_2,__A : Any=1_6,__A : Any=2,__A : Any=0.02,__A : Dict=3,__A : int=4,__A : List[str]=None,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : List[Any] = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : Any = scope
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = EsmConfig(
vocab_size=3_3,hidden_size=self.hidden_size,pad_token_id=1,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,is_folding_model=SCREAMING_SNAKE_CASE_,esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False},)
return config
def lowerCamelCase_ ( self : Optional[Any],__A : Tuple,__A : Union[str, Any],__A : Tuple,__A : List[str],__A : Dict,__A : Dict ):
_lowerCamelCase : Any = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_,attention_mask=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape,(8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape,(8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : List[str] = config_and_inputs
_lowerCamelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase_ = ()
lowerCAmelCase_ = {} if is_torch_available() else {}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Dict = EsmFoldModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self,config_class=SCREAMING_SNAKE_CASE_,hidden_size=3_7 )
def lowerCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip("Does not support attention outputs" )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold only has one output format." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : List[Any] ):
pass
@require_torch
class UpperCAmelCase__ ( A ):
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
_lowerCamelCase : Optional[int] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )["""positions"""]
_lowerCamelCase : Any = torch.tensor([2.5828, 0.7993, -10.9334],dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0],SCREAMING_SNAKE_CASE_,atol=1e-4 ) ) | 44 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
inspect_dataset(__A , __A )
lowercase_ : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
inspect_metric(__A , __A )
lowercase_ : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
lowercase_ : Any = expected_configs[0]
assert expected_config in infos
lowercase_ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = get_dataset_infos(__A )
assert expected_config in infos
lowercase_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 620 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCamelCase :
"""simple docstring"""
@staticmethod
def _snake_case ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Dict = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
import datasets
A_ : Any = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
A_ : Any = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
A_ : Union[str, Any] = object_detector(SCREAMING_SNAKE_CASE_ , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def _snake_case ( self )->str:
'''simple docstring'''
pass
@require_torch
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[int] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
A_ : Optional[int] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : str = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
A_ : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
A_ : Optional[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Optional[int] = """facebook/detr-resnet-50"""
A_ : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : Union[str, Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
A_ : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
A_ : str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : str = """facebook/detr-resnet-50"""
A_ : str = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ )
A_ : Any = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
A_ : List[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = 0.9_9_8_5
A_ : List[Any] = """facebook/detr-resnet-50"""
A_ : Any = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ )
A_ : Tuple = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _snake_case ( self )->int:
'''simple docstring'''
A_ : int = """Narsil/layoutlmv3-finetuned-funsd"""
A_ : Dict = 0.9_9_9_3
A_ : Any = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
A_ : Dict = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 590 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="attention" ) -> Optional[int]:
lowercase__ : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowercase__ : Any = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
lowercase__ : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowercase__ : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
lowercase__ : List[str] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowercase__ : Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
lowercase__ : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowercase__ : Any = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
if split_mlp_wi:
lowercase__ : List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase__ : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase__ : Any = (wi_a, wi_a)
else:
lowercase__ : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase__ : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,*, SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ) -> Union[str, Any]:
lowercase__ : int = traverse_util.flatten_dict(variables["target"] )
lowercase__ : Dict = {"""/""".join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Optional[int] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("Split MLP:" ,__A )
lowercase__ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Optional[Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(__A ,__A ,"encoder" ,"pre_attention_layer_norm" )
lowercase__ : Any = tax_attention_lookup(__A ,__A ,"encoder" ,"attention" )
lowercase__ : Optional[int] = layer_norm
lowercase__ : Tuple = k.T
lowercase__ : Optional[Any] = o.T
lowercase__ : Dict = q.T
lowercase__ : Dict = v.T
# Block i, layer 1 (MLP).
lowercase__ : Dict = tax_layer_norm_lookup(__A ,__A ,"encoder" ,"pre_mlp_layer_norm" )
lowercase__ : Any = tax_mlp_lookup(__A ,__A ,"encoder" ,__A )
lowercase__ : Dict = layer_norm
if split_mlp_wi:
lowercase__ : Union[str, Any] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : List[Any] = wi.T
lowercase__ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : Optional[int] = tax_relpos_bias_lookup(
__A ,__A ,"encoder" ).T
lowercase__ : Any = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowercase__ : int = tax_relpos_bias_lookup(
__A ,0 ,"encoder" ).T
lowercase__ : Tuple = tax_relpos_bias_lookup(
__A ,0 ,"decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
lowercase__ : int = tax_layer_norm_lookup(__A ,__A ,"decoder" ,"pre_self_attention_layer_norm" )
lowercase__ : Any = tax_attention_lookup(__A ,__A ,"decoder" ,"self_attention" )
lowercase__ : Union[str, Any] = layer_norm
lowercase__ : str = k.T
lowercase__ : Dict = o.T
lowercase__ : Optional[Any] = q.T
lowercase__ : int = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : int = tax_layer_norm_lookup(__A ,__A ,"decoder" ,"pre_cross_attention_layer_norm" )
lowercase__ : Optional[Any] = tax_attention_lookup(__A ,__A ,"decoder" ,"encoder_decoder_attention" )
lowercase__ : Union[str, Any] = layer_norm
lowercase__ : int = k.T
lowercase__ : Dict = o.T
lowercase__ : Optional[Any] = q.T
lowercase__ : Tuple = v.T
# Block i, layer 2 (MLP).
lowercase__ : int = tax_layer_norm_lookup(__A ,__A ,"decoder" ,"pre_mlp_layer_norm" )
lowercase__ : Union[str, Any] = tax_mlp_lookup(__A ,__A ,"decoder" ,__A )
lowercase__ : List[Any] = layer_norm
if split_mlp_wi:
lowercase__ : Tuple = wi[0].T
lowercase__ : Any = wi[1].T
else:
lowercase__ : Union[str, Any] = wi.T
lowercase__ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : int = tax_relpos_bias_lookup(__A ,__A ,"decoder" ).T
lowercase__ : Union[str, Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Dict = old["""decoder/logits_dense/kernel"""].T
return new
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowercase__ : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Optional[int] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : str = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase__ : Any = state_dict["""shared.weight"""]
return state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : int = checkpoints.load_tax_checkpoint(__A )
lowercase__ : Any = convert_tax_to_pytorch(
__A ,num_layers=config.num_layers ,is_encoder_only=__A ,scalable_attention=__A )
lowercase__ : Optional[int] = make_state_dict(__A ,__A )
model.load_state_dict(__A ,strict=__A )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,) -> List[str]:
lowercase__ : List[str] = MTaConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Tuple = UMTaEncoderModel(__A )
else:
lowercase__ : str = UMTaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A ,__A ,__A ,__A ,__A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print("Done" )
if __name__ == "__main__":
__a : str = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
) | 397 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_UpperCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ ( UpperCamelCase):
def __init__( self : Dict , _A : Tuple , _A : int , _A : Union[str, Any] , _A : Optional[int] , _A : Union[str, Any] , _A : str , _A : Union[str, Any] , ) -> Tuple:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def A ( self : Dict , _A : Optional[Any] = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def A ( self : Any ) -> Union[str, Any]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : str , _A : Union[str, Any] , _A : Optional[int] = 5_12 , _A : Union[str, Any] = 5_12 , _A : Tuple = 50 , _A : Tuple = 7.5 , _A : List[str] = None , _A : str = 1 , _A : str = 0.0 , _A : Dict = None , _A : Union[str, Any] = None , _A : str = "pil" , _A : Tuple = True , _A : int = None , _A : Union[str, Any] = 1 , _A : Optional[int] = None , **_A : Tuple , ) -> List[str]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ : List[Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ : Any = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(SCREAMING_SNAKE_CASE_ )}." )
# get prompt text embeddings
UpperCAmelCase_ : Any = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase_ : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase_ : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase_ : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : Optional[int] = text_embeddings.shape
UpperCAmelCase_ : Any = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCAmelCase_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ : List[str]
if negative_prompt is None:
UpperCAmelCase_ : int = [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !="
F" {type(SCREAMING_SNAKE_CASE_ )}." )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ : Dict = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase_ : Optional[Any] = negative_prompt
UpperCAmelCase_ : List[str] = text_input_ids.shape[-1]
UpperCAmelCase_ : List[str] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCAmelCase_ : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : Optional[Any] = uncond_embeddings.shape[1]
UpperCAmelCase_ : Union[str, Any] = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCAmelCase_ : Any = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase_ : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_ : Optional[int] = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
UpperCAmelCase_ : Any = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCAmelCase_ : List[Any] = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCAmelCase_ : str = latents_reference.to(self.device )
UpperCAmelCase_ : Optional[int] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase_ : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase_ : List[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase_ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase_ : List[str] = 0 if dx < 0 else dx
UpperCAmelCase_ : Any = 0 if dy < 0 else dy
UpperCAmelCase_ : Dict = max(-dx , 0 )
UpperCAmelCase_ : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase_ : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase_ : int = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCAmelCase_ : str = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : int = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[Any] = 1 / 0.18_215 * latents
UpperCAmelCase_ : Any = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCAmelCase_ : str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase_ : Tuple = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='''pt''' ).to(
self.device )
UpperCAmelCase_ : Optional[Any] = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase_ : List[str] = None
if output_type == "pil":
UpperCAmelCase_ : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 541 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase_ : Any = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase_ : Optional[Any] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase_ : Dict = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowercase_ ( self : Tuple , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : int=False ):
'''simple docstring'''
a_ : Optional[Any] = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 442 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCAmelCase = '''main'''
# Default branch name
__lowerCAmelCase = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
__lowerCAmelCase = '''aaaaaaa'''
# This commit does not exist, so we should 404.
__lowerCAmelCase = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCAmelCase = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def _lowercase ( ) -> str:
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def _lowercase ( ) -> List[str]:
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Any:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowerCamelCase_ ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowercase ( self , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowercase ( self , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowercase ( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def lowercase ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["labels"] )
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["start_positions", "end_positions"] )
class lowerCamelCase_ ( lowercase ):
pass
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["labels"] )
@require_tf
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["labels"] )
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["start_positions", "end_positions"] )
class lowerCamelCase_ ( lowercase ):
pass
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , ["labels"] )
@require_flax
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , [] )
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , [] )
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , [] )
class lowerCamelCase_ ( lowercase ):
pass
self.assertEqual(find_labels(SCREAMING_SNAKE_CASE_ ) , [] )
| 147 |
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = 1
lowercase__ = 3
lowercase__ = (32, 32)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
def extract(*UpperCamelCase_: int , **UpperCamelCase_: List[str] ):
class _a :
def __init__( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.ones([0] )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Tuple ) -> int:
"""simple docstring"""
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ = """A painting of a squirrel eating a burger"""
lowercase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ = """A painting of a squirrel eating a burger"""
lowercase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE_ )
assert pipe.safety_checker is None
lowercase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase__ = unet.half()
lowercase__ = vae.half()
lowercase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ = """A painting of a squirrel eating a burger"""
lowercase__ = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase__ = 4_003_660_346
lowercase__ = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase__ = 2_734_971_755
lowercase__ = 7
lowercase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase__ = 1_044_355_234
lowercase__ = 12
lowercase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ): # noqa: E741
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = len(__A )
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = [0] * n
_SCREAMING_SNAKE_CASE : str = [False] * n
_SCREAMING_SNAKE_CASE : Tuple = [False] * n
def dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if parent == root:
out_edge_count += 1
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : List[str] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_SCREAMING_SNAKE_CASE : List[str] = dfs(__A , __A , __A , __A )
_SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_SCREAMING_SNAKE_CASE : List[str] = True
# AP found via cycle
if at == low[to]:
_SCREAMING_SNAKE_CASE : Any = True
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , __A )
return out_edge_count
for i in range(__A ):
if not visited[i]:
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = dfs(__A , __A , -1 , __A )
_SCREAMING_SNAKE_CASE : Optional[int] = out_edge_count > 1
for x in range(len(__A ) ):
if is_art[x] is True:
print(__A )
# Adjacency list of graph
UpperCAmelCase_ : Dict = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 533 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str ):
"""simple docstring"""
for attribute in key.split('.' ):
__a =getattr(__A , __A )
if weight_type is not None:
__a =getattr(__A , __A ).shape
else:
__a =hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__a =value
elif weight_type == "weight_g":
__a =value
elif weight_type == "weight_v":
__a =value
elif weight_type == "bias":
__a =value
else:
__a =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : int ):
"""simple docstring"""
__a =[]
__a =fairseq_model.state_dict()
__a =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
__a =True
else:
for key, mapped_key in MAPPING.items():
__a ="""sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a =True
if "*" in mapped_key:
__a =name.split(__A )[0].split('.' )[-2]
__a =mapped_key.replace('*' , __A )
if "weight_g" in name:
__a ="""weight_g"""
elif "weight_v" in name:
__a ="""weight_v"""
elif "weight" in name:
__a ="""weight"""
elif "bias" in name:
__a ="""bias"""
else:
__a =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =full_name.split('conv_layers.' )[-1]
__a =name.split('.' )
__a =int(items[0] )
__a =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : List[str] ):
"""simple docstring"""
__a =SEWConfig()
if is_finetuned:
__a =model.wav_encoder.wav_model.cfg
else:
__a =model.cfg
__a =fs_config.conv_bias
__a =eval(fs_config.conv_feature_layers )
__a =[x[0] for x in conv_layers]
__a =[x[1] for x in conv_layers]
__a =[x[2] for x in conv_layers]
__a ="""gelu"""
__a ="""layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__a =0.0
__a =fs_config.activation_fn.name
__a =fs_config.encoder_embed_dim
__a =0.02
__a =fs_config.encoder_ffn_embed_dim
__a =1e-5
__a =fs_config.encoder_layerdrop
__a =fs_config.encoder_attention_heads
__a =fs_config.conv_pos_groups
__a =fs_config.conv_pos
__a =len(__A )
__a =fs_config.encoder_layers
__a =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__a =model.cfg
__a =fs_config.final_dropout
__a =fs_config.layerdrop
__a =fs_config.activation_dropout
__a =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__a =fs_config.attention_dropout
__a =fs_config.dropout_input
__a =fs_config.dropout
__a =fs_config.mask_channel_length
__a =fs_config.mask_channel_prob
__a =fs_config.mask_length
__a =fs_config.mask_prob
__a ="""Wav2Vec2FeatureExtractor"""
__a ="""Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def UpperCamelCase_( _snake_case : Any , _snake_case : str , _snake_case : Any=None , _snake_case : int=None , _snake_case : Union[str, Any]=True ):
"""simple docstring"""
if is_finetuned:
__a =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__a =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__a =SEWConfig.from_pretrained(__A )
else:
__a =convert_config(model[0] , __A )
__a =model[0].eval()
__a =True if config.feat_extract_norm == """layer""" else False
__a =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
__a =Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a =target_dict.pad_index
__a =target_dict.bos_index
__a =target_dict.pad_index
__a =target_dict.bos_index
__a =target_dict.eos_index
__a =len(target_dict.symbols )
__a =os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
__a =WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
__a =WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
__a =SEWForCTC(__A )
else:
__a =SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 242 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : int = JukeboxTokenizer
_lowercase : int = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowercase ( self ) -> Any:
'''simple docstring'''
import torch
a__ : Optional[Any] =JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
a__ : List[Any] =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
a__ : int =[
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowercase ( self ) -> Any:
'''simple docstring'''
import torch
a__ : str =JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
a__ : Any =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
a__ : Tuple =[
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 563 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowercase : Optional[Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
__lowercase : Optional[int] = None
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__A : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , __A )
def white_space_fix(__A : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
snake_case : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def lowercase ( __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def lowercase ( __A : Any , __A : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = get_tokens(__A )
snake_case : str = get_tokens(__A )
snake_case : Dict = collections.Counter(__A ) & collections.Counter(__A )
snake_case : Optional[int] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : List[Any] = 1.0 * num_same / len(__A )
snake_case : int = 1.0 * num_same / len(__A )
snake_case : Dict = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __A : List[Any] , __A : int ) -> str:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : str = qa["""id"""]
snake_case : Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Union[str, Any] = max(compute_exact(__A , __A ) for a in gold_answers )
snake_case : Optional[int] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __A : str , __A : Any , __A : List[Any] , __A : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = {}
for qid, s in scores.items():
snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : str = float(not qid_to_has_ans[qid] )
else:
snake_case : List[Any] = s
return new_scores
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str]=None ) -> int:
'''simple docstring'''
if not qid_list:
snake_case : List[str] = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : Any = len(__A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
snake_case : str = new_eval[k]
def lowercase ( __A : Tuple , __A : int , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
plt.step(__A , __A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__A , __A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def lowercase ( __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Tuple , __A : Optional[Any]=None , __A : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = sorted(__A , key=lambda __A : na_probs[k] )
snake_case : Any = 0.0
snake_case : str = 1.0
snake_case : Tuple = 0.0
snake_case : str = [1.0]
snake_case : Any = [0.0]
snake_case : Dict = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : str = true_pos / float(i + 1 )
snake_case : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def lowercase ( __A : Any , __A : Optional[int] , __A : Tuple , __A : Tuple , __A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
snake_case : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case : Dict = {k: float(__A ) for k, v in qid_to_has_ans.items()}
snake_case : int = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__A , __A , """pr_exact""" )
merge_eval(__A , __A , """pr_f1""" )
merge_eval(__A , __A , """pr_oracle""" )
def lowercase ( __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
return
snake_case : int = [na_probs[k] for k in qid_list]
snake_case : List[str] = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowercase ( __A : List[Any] , __A : Tuple , __A : Tuple , __A : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : str = num_no_ans
snake_case : Optional[Any] = cur_score
snake_case : Optional[Any] = 0.0
snake_case : List[Any] = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : Dict = scores[qid]
else:
if preds[qid]:
snake_case : Dict = -1
else:
snake_case : str = 0
cur_score += diff
if cur_score > best_score:
snake_case : Union[str, Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def lowercase ( __A : Dict , __A : str , __A : str , __A : int , __A : str , __A : Any ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[int] = find_best_thresh(__A , __A , __A , __A )
snake_case , snake_case : str = find_best_thresh(__A , __A , __A , __A )
snake_case : List[str] = best_exact
snake_case : List[Any] = exact_thresh
snake_case : Optional[Any] = best_fa
snake_case : Optional[int] = fa_thresh
def lowercase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case : Dict = json.load(__A )
snake_case : Union[str, Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : int = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Any = json.load(__A )
else:
snake_case : Any = {k: 0.0 for k in preds}
snake_case : Optional[int] = make_qid_to_has_ans(__A ) # maps qid to True/False
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : Optional[Any] = get_raw_scores(__A , __A )
snake_case : Tuple = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
snake_case : Optional[int] = make_eval_dict(__A , __A )
if has_ans_qids:
snake_case : Any = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """HasAns""" )
if no_ans_qids:
snake_case : str = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
__lowercase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 36 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase_ : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int],__A : Optional[Any] ):
super().__init__()
_lowerCamelCase : Optional[int] = torchvision.models.resnetaaa(pretrained=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = list(model.children() )[:-2]
_lowerCamelCase : Union[str, Any] = nn.Sequential(*SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase_ ( self : int,__A : Tuple ):
_lowerCamelCase : Optional[int] = self.pool(self.model(SCREAMING_SNAKE_CASE_ ) )
_lowerCamelCase : Optional[int] = torch.flatten(SCREAMING_SNAKE_CASE_,start_dim=2 )
_lowerCamelCase : Optional[Any] = out.transpose(1,2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase__ ( A ):
def __init__( self : List[str],__A : str,__A : Dict,__A : List[Any],__A : Any,__A : List[str] ):
_lowerCamelCase : Optional[int] = [json.loads(SCREAMING_SNAKE_CASE_ ) for l in open(SCREAMING_SNAKE_CASE_ )]
_lowerCamelCase : Any = os.path.dirname(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Optional[int] = labels
_lowerCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = max_seq_length
_lowerCamelCase : Tuple = transforms
def __len__( self : Dict ):
return len(self.data )
def __getitem__( self : Any,__A : Union[str, Any] ):
_lowerCamelCase : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"],add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
_lowerCamelCase : Tuple = sentence[0], sentence[1:-1], sentence[-1]
_lowerCamelCase : Dict = sentence[: self.max_seq_length]
_lowerCamelCase : Union[str, Any] = torch.zeros(self.n_classes )
_lowerCamelCase : Dict = 1
_lowerCamelCase : List[Any] = Image.open(os.path.join(self.data_dir,self.data[index]["img"] ) ).convert("RGB" )
_lowerCamelCase : Optional[int] = self.transforms(SCREAMING_SNAKE_CASE_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Any = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = [len(row["sentence"] ) for row in batch]
_lowerCamelCase : Any = len(__A ), max(__A )
_lowerCamelCase : Optional[Any] = torch.zeros(__A , __A , dtype=torch.long )
_lowerCamelCase : List[str] = torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
_lowerCamelCase : List[Any] = input_row["""sentence"""]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[Any] = torch.stack([row["image"] for row in batch] )
_lowerCamelCase : int = torch.stack([row["label"] for row in batch] )
_lowerCamelCase : int = torch.stack([row["image_start_token"] for row in batch] )
_lowerCamelCase : List[Any] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A_ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A_ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] ) | 44 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = len(__A )
lowercase_ : Union[str, Any] = len(__A )
lowercase_ : Union[str, Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowercase_ : List[Any] = True
for i in range(__A ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowercase_ : Any = True
if a[i].islower():
lowercase_ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
A_ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : str = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Dict = """"""
else:
A_ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
A_ : str = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : int = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = dct.pop(__A )
A_ : Any = val
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = ViTConfig()
A_ : Dict = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ : Dict = True
A_ : Union[str, Any] = int(vit_name[-12:-10] )
A_ : Union[str, Any] = int(vit_name[-9:-6] )
else:
A_ : Dict = 1_000
A_ : List[Any] = """huggingface/label-files"""
A_ : str = """imagenet-1k-id2label.json"""
A_ : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
A_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[Any] = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = int(vit_name[-6:-4] )
A_ : Optional[Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
A_ : Union[str, Any] = 192
A_ : Optional[int] = 768
A_ : Optional[int] = 12
A_ : Tuple = 3
elif vit_name[9:].startswith('''small''' ):
A_ : Tuple = 384
A_ : Any = 1_536
A_ : Any = 12
A_ : Any = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
A_ : Optional[int] = 768
A_ : str = 2_304
A_ : Optional[int] = 8
A_ : int = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
A_ : Optional[Any] = 1_024
A_ : List[str] = 4_096
A_ : List[str] = 24
A_ : Optional[Any] = 16
elif vit_name[4:].startswith('''huge''' ):
A_ : List[str] = 1_280
A_ : Dict = 5_120
A_ : Dict = 32
A_ : Optional[Any] = 16
# load original model from timm
A_ : Any = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
A_ : List[Any] = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : Optional[Any] = ViTModel(__A ).eval()
else:
A_ : Dict = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ : Dict = DeiTImageProcessor(size=config.image_size )
else:
A_ : List[Any] = ViTImageProcessor(size=config.image_size )
A_ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
A_ : List[Any] = encoding["""pixel_values"""]
A_ : str = model(__A )
if base_model:
A_ : List[Any] = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1e-3 )
else:
A_ : Union[str, Any] = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 590 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 | 0 |
from collections import defaultdict
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : Union[str, Any] = 1
lowercase__ : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(__A )
if ret % 2 == 0:
cuts.append(__A )
return ret
def snake_case_ ( ) -> Union[str, Any]:
dfs(1 )
if __name__ == "__main__":
__a : Union[str, Any] = 1_0, 9
__a : Dict = defaultdict(list)
__a : dict[int, bool] = {}
__a : list[int] = []
__a : Optional[Any] = 0
__a : Optional[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 397 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_UpperCamelCase : str = logging.getLogger(__name__)
def __UpperCAmelCase ( A : List[Any]=2 , A : List[Any]=3 , A : Optional[int]=1_6 , A : int = 1_0 , A : int = 2 ) -> Dict:
def get_dataset(A : Optional[int] ):
UpperCAmelCase_ : Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__A , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Union[str, Any] = get_dataset(__A )
UpperCAmelCase_ : Tuple = get_dataset(__A )
UpperCAmelCase_ : Any = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase_ : Optional[int] = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCAmelCase ( A : str , A : Optional[Any] , A : List[str] , A : int , A : Union[str, Any] , A : List[str]=None ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = []
for epoch in range(__A ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ : List[str] = batch
UpperCAmelCase_ : int = model(__A )
UpperCAmelCase_ : Tuple = torch.nn.functional.mse_loss(__A , __A )
accelerator.backward(__A )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case__ ( nn.Module):
def __init__( self : Optional[Any] ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : str = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : int = nn.Parameter(torch.randn(1 ) )
def A ( self : Dict , _A : int ) -> Tuple:
return x * self.a + self.b
class snake_case__ ( unittest.TestCase):
def A ( self : Tuple ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Dict = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCAmelCase_ : Any = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Dict = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : int ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : List[str] = DummyModel()
UpperCAmelCase_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : str = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : str = Accelerator()
UpperCAmelCase_ : Optional[int] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
UpperCAmelCase_ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
(UpperCAmelCase_) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(UpperCAmelCase_) : Any = model.a.item(), model.b.item()
UpperCAmelCase_ : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ : List[str] = DummyModel()
UpperCAmelCase_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : List[Any] = Accelerator()
UpperCAmelCase_ : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
(UpperCAmelCase_) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[Any] = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
UpperCAmelCase_ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(UpperCAmelCase_) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : int = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A ( self : List[Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : str = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : int = dummy_dataloaders()
UpperCAmelCase_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCAmelCase_ : Optional[Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
(UpperCAmelCase_) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
UpperCAmelCase_ : Any = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(UpperCAmelCase_) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : str = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Optional[Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[str] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
(UpperCAmelCase_) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
(UpperCAmelCase_) : Dict = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase_ : str = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Optional[Any] = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : List[str] = DummyModel()
UpperCAmelCase_ : Union[str, Any] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A ( self : Optional[Any] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Tuple = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
UpperCAmelCase_ : Optional[Any] = dummy_dataloaders()
UpperCAmelCase_ : int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Optional[int] = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A ( self : Dict ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : List[Any] = DummyModel()
UpperCAmelCase_ : int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
UpperCAmelCase_ : List[str] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : List[str] = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCamelCase : int = '''/tmp/accelerate/state_checkpointing'''
_UpperCamelCase : Optional[int] = DummyModel()
_UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_UpperCamelCase : Tuple = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_UpperCamelCase : List[str] = dummy_dataloaders()
_UpperCamelCase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_UpperCamelCase : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_UpperCamelCase : Optional[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_UpperCamelCase : Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_UpperCamelCase : Optional[Any] = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
_UpperCamelCase : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_UpperCamelCase : int = group['''params'''][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_UpperCamelCase : Any = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 541 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36 | 0 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class SCREAMING_SNAKE_CASE ( snake_case_ ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : Any , lowercase__ : List[str] , lowercase__ : List[Any] , **lowercase__ : Any ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] = None ):
'''simple docstring'''
a_ : List[Any] = max_length
a_ : Union[str, Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : Any , lowercase__ : str , lowercase__ : Dict , **lowercase__ : Optional[int] ):
'''simple docstring'''
a_ : List[str] = input_ids.shape[-1]
a_ : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self : int , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" , SCREAMING_SNAKE_CASE_ , )
a_ : List[str] = start_length
a_ : str = max_new_tokens
a_ : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : str , lowercase__ : str , lowercase__ : Dict , **lowercase__ : Any ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self : List[str] , lowercase__ : Union[str, Any] , lowercase__ : List[str] = None ):
'''simple docstring'''
a_ : Any = max_time
a_ : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : str , lowercase__ : Optional[int] , lowercase__ : Dict , **lowercase__ : Union[str, Any] ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE ( snake_case_ ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] , **lowercase__ : Dict ):
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : StoppingCriteriaList , UpperCamelCase__ : int ):
"""simple docstring"""
a_ : List[str] = stopping_criteria.max_length
a_ : int = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 442 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = '''cpu'''
__lowerCAmelCase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase = '''path-to-your-trained-model'''
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase = pipe.to(device)
# to channels last
__lowerCAmelCase = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase = torch.randn(2, 4, 6_4, 6_4)
__lowerCAmelCase = torch.rand(1) * 9_9_9
__lowerCAmelCase = torch.randn(2, 7_7, 7_6_8)
__lowerCAmelCase = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase = 6_6_6
__lowerCAmelCase = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 147 |
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 | 0 |
import math
import random
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase = 0.0_2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(__A ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 1_00) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__A , __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = int(input('Expected value: '))
lowerCAmelCase = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 43 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(args=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
A_ : int = '''CLIPImageProcessor'''
A_ : Optional[Any] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
_SCREAMING_SNAKE_CASE : int = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 533 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Optional[int] ):
"""simple docstring"""
__a =[1]
for i in range(2 , __A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a =[]
__a =list(range(__A ) )
# Find permutation
while factorials:
__a =factorials.pop()
__a =divmod(__A , __A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> str:
'''simple docstring'''
inspect_dataset(__A , __A )
snake_case : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( __A : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(__A , __A )
snake_case : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Tuple , __A : Dict , __A : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Tuple , __A : Any , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( __A : Any , __A : Dict ) -> Dict:
'''simple docstring'''
snake_case : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[Any] , __A : Dict , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
snake_case : Any = expected_configs[0]
assert expected_config in infos
snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_dataset_infos(__A )
assert expected_config in infos
snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( __A : Optional[int] , __A : Any , __A : Dict ) -> int:
'''simple docstring'''
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 36 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = ''''''
_lowercase : Union[str, Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(self , **SCREAMING_SNAKE_CASE_ )
a__ : List[Any] =repo_info
a__ : Dict =token
a__ : Any =None
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
a__ : str ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a__ : Union[str, Any] ={
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {"name": str(SCREAMING_SNAKE_CASE_ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a__ : Tuple =hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE_ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _lowercase ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
self._get_dirs()
a__ : List[Any] =self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
self._get_dirs()
a__ : List[str] =PurePosixPath(path.strip("/" ) )
a__ : Optional[int] ={}
for p, f in self.dir_cache.items():
a__ : List[str] =PurePosixPath(p.strip("/" ) )
a__ : int =p.parent
if root == path:
a__ : Any =f
a__ : Optional[Any] =list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 563 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase : Optional[Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30000 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=16384 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = vocab_size
snake_case : int = embedding_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : int = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : List[str] = inner_group_num
snake_case : Any = hidden_act
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = classifier_dropout_prob
snake_case : str = position_embedding_type
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase__ :
def __init__( self : str,__A : Any,__A : str=1_3,__A : List[Any]=7,__A : Optional[int]=True,__A : Any=True,__A : Optional[Any]=True,__A : int=True,__A : Dict=9_9,__A : List[Any]=3_2,__A : Dict=2,__A : Optional[Any]=4,__A : Tuple=3_7,__A : Optional[Any]="gelu",__A : Tuple=0.1,__A : Union[str, Any]=0.1,__A : Union[str, Any]=5_1_2,__A : Union[str, Any]=1_6,__A : Tuple=2,__A : Any=0.02,__A : Any=False,__A : int=True,__A : Optional[Any]="None",__A : Optional[int]=3,__A : Optional[Any]=4,__A : Any=None,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Optional[Any] = use_input_mask
_lowerCamelCase : Any = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : List[Any] = relative_attention
_lowerCamelCase : int = position_biased_input
_lowerCamelCase : Any = pos_att_type
_lowerCamelCase : Optional[int] = scope
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : int = DebertaVaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,relative_attention=self.relative_attention,position_biased_input=self.position_biased_input,initializer_range=self.initializer_range,return_dict=SCREAMING_SNAKE_CASE_,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int,__A : List[Any],__A : Optional[Any],__A : Optional[Any],__A : Optional[int],__A : List[Any],__A : Any,__A : Tuple ):
_lowerCamelCase : Optional[Any] = TFDebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCamelCase : Dict = [input_ids, input_mask]
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Dict,__A : List[Any],__A : Dict,__A : Optional[Any],__A : Dict ):
_lowerCamelCase : Any = TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Optional[int],__A : List[Any],__A : Any,__A : str,__A : Union[str, Any],__A : Optional[Any],__A : Dict,__A : Union[str, Any] ):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Any = TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : Tuple,__A : Optional[Any],__A : Optional[Any],__A : Union[str, Any],__A : List[Any],__A : List[str] ):
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Union[str, Any] = TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int],__A : Any,__A : List[str],__A : int,__A : Optional[Any],__A : int,__A : Tuple,__A : Tuple ):
_lowerCamelCase : Dict = TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Optional[Any] = config_and_inputs
_lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = TFDebertaVaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self,config_class=SCREAMING_SNAKE_CASE_,hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowerCamelCase_ ( self : List[str] ):
pass
@slow
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Dict = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_lowerCamelCase : Dict = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_,attention_mask=SCREAMING_SNAKE_CASE_ )[0]
_lowerCamelCase : Optional[Any] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4],SCREAMING_SNAKE_CASE_,atol=1e-4 ) | 44 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = len(__A )
# We need to create solution object to save path.
lowercase_ : Union[str, Any] = [[0 for _ in range(__A )] for _ in range(__A )]
lowercase_ : str = run_maze(__A , 0 , 0 , __A )
if solved:
print("\n".join(str(__A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = len(__A )
# Final check point.
if i == j == (size - 1):
lowercase_ : str = 1
return True
lowercase_ : str = (not i < 0) and (not j < 0) # Check lower bounds
lowercase_ : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase_ : Optional[int] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase_ : List[str] = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
lowercase_ : Optional[int] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCamelCase = get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE = None )->Any:
'''simple docstring'''
A_ : Optional[int] = (
os.path.join(SCREAMING_SNAKE_CASE_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
A_ : Union[str, Any] = Extractor
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
A_ : str = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )->str:
'''simple docstring'''
A_ : Optional[Any] = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
A_ : int = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output_path
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@classmethod
@abstractmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
...
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
snake_case = []
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = b"" )->List[Any]:
'''simple docstring'''
if not magic_number:
A_ : str = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
A_ : Dict = cls.read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
def resolved(_SCREAMING_SNAKE_CASE ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
# Links are interpreted relative to the directory containing the link
A_ : Optional[Any] = resolved(os.path.join(SCREAMING_SNAKE_CASE_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE_ )
A_ : Dict = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE_ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A_ : List[str] = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [B'''\x1F\x8B''']
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
with gzip.open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = b"" )->Optional[Any]:
'''simple docstring'''
if super().is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as fp:
A_ : List[Any] = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
A_ : List[Any] = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
A_ : str = struct.unpack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''r''' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A_ : Optional[Any] = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
A_ : Any = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as ifh, open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [B'''\x42\x5A\x68''']
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
with bza.open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ , '''r''' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [B'''\x04\x22\x4D\x18''']
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase :
"""simple docstring"""
snake_case = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _snake_case ( cls )->int:
'''simple docstring'''
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_ , magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )->Any:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=SCREAMING_SNAKE_CASE_ , )
A_ : Union[str, Any] = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE )->Tuple: # <Added version="2.4.0"/>
'''simple docstring'''
A_ : str = cls._get_magic_number_max_length()
A_ : Optional[Any] = cls._read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "deprecated" , )->int:
'''simple docstring'''
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
A_ : str = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix('''.lock''' ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ , ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=SCREAMING_SNAKE_CASE_ , )
A_ : Dict = extractor if extractor != """deprecated""" else extractor_format
else:
A_ : Optional[Any] = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=SCREAMING_SNAKE_CASE_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 590 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase:
"""simple docstring"""
def __init__( self , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=64 , lowerCamelCase=None ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = np.random.default_rng(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = length
lowercase__ : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
lowercase__ : Dict = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> int:
"""simple docstring"""
return self.length
def __getitem__( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase( torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=False ) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : Dict = True
def __a ( self , lowerCamelCase=None ) -> int:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : Tuple = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase( torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=False ) -> int:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() )
lowercase__ : Optional[Any] = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() )
lowercase__ : Dict = True
def __a ( self , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : int = False
return x * self.a + self.b
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 16 ) -> List[str]:
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase__ : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase__ : Tuple = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowercase__ : List[str] = load_dataset("csv" ,data_files=__A )
lowercase__ : str = datasets["""train"""].unique("label" )
lowercase__ : Any = {v: i for i, v in enumerate(__A )}
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Tuple = tokenizer(
examples["sentence1"] ,examples["sentence2"] ,truncation=__A ,max_length=__A ,padding="max_length" )
if "label" in examples:
lowercase__ : str = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Dict = datasets.map(
__A ,batched=__A ,remove_columns=["sentence1", "sentence2", "label"] ,)
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A ,padding="max_length" ,max_length=1_28 ,return_tensors="pt" )
return tokenizer.pad(__A ,padding="longest" ,return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ : str = DataLoader(tokenized_datasets["train"] ,shuffle=__A ,collate_fn=__A ,batch_size=2 )
lowercase__ : Union[str, Any] = DataLoader(tokenized_datasets["validation"] ,shuffle=__A ,collate_fn=__A ,batch_size=1 )
return train_dataloader, eval_dataloader | 397 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case__ ( ctypes.Structure):
a_ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def __UpperCAmelCase ( ) -> Optional[Any]:
if os.name == "nt":
UpperCAmelCase_ : int = CursorInfo()
UpperCAmelCase_ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
UpperCAmelCase_ : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def __UpperCAmelCase ( ) -> Tuple:
if os.name == "nt":
UpperCAmelCase_ : Optional[Any] = CursorInfo()
UpperCAmelCase_ : Dict = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
UpperCAmelCase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def __UpperCAmelCase ( ) -> Union[str, Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 541 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self : Dict , lowercase__ : Any , lowercase__ : Any , lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : List[str] , ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
a_ : Any = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
a_ : List[Any] = dict(scheduler.config )
a_ : str = 1
a_ : int = FrozenDict(SCREAMING_SNAKE_CASE_ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
a_ : int = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
a_ : Tuple = dict(scheduler.config )
a_ : Optional[int] = True
a_ : List[Any] = FrozenDict(SCREAMING_SNAKE_CASE_ )
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE_ , segmentation_processor=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self : Optional[int] , lowercase__ : List[Any] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Dict ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
a_ : str = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Dict , lowercase__ : int , lowercase__ : int , lowercase__ : str , lowercase__ : Optional[int] = 512 , lowercase__ : Optional[int] = 512 , lowercase__ : Tuple = 50 , lowercase__ : Any = 7.5 , lowercase__ : Any = None , lowercase__ : Optional[Any] = 1 , lowercase__ : int = 0.0 , lowercase__ : Tuple = None , lowercase__ : Dict = None , lowercase__ : List[str] = "pil" , lowercase__ : Optional[int] = True , lowercase__ : str = None , lowercase__ : str = 1 , **lowercase__ : Dict , ):
'''simple docstring'''
a_ : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
a_ : Union[str, Any] = self.segmentation_model(**SCREAMING_SNAKE_CASE_ )
a_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
a_ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
a_ : Union[str, Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , )
| 442 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
from __future__ import annotations
class A__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase__ : int ):
a__ : List[str] = order
# a_{0} ... a_{k}
a__ : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
a__ : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
a__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
a__ : Optional[Any] = [0.0] * self.order
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : list[float] , lowerCamelCase__ : list[float] ):
if len(lowerCamelCase__ ) < self.order:
a__ : Any = [1.0, *a_coeffs]
if len(lowerCamelCase__ ) != self.order + 1:
a__ : str = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCamelCase__ )}'''
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != self.order + 1:
a__ : Union[str, Any] = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCamelCase__ )}'''
)
raise ValueError(lowerCamelCase__ )
a__ : Union[str, Any] = a_coeffs
a__ : Union[str, Any] = b_coeffs
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : float ):
a__ : List[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
a__ : List[str] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
a__ : str = self.input_history[:-1]
a__ : Any = self.output_history[:-1]
a__ : List[Any] = sample
a__ : Tuple = result
return result
| 37 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any]=13 , lowerCamelCase__ : str=7 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[str]=99 , lowerCamelCase__ : Optional[Any]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Union[str, Any]=64 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Any=512 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : Dict=1 , ):
a__ : Optional[int] = parent
a__ : Any = batch_size
a__ : Tuple = seq_length
a__ : Union[str, Any] = is_training
a__ : List[str] = use_input_mask
a__ : int = use_token_type_ids
a__ : Optional[Any] = use_labels
a__ : Tuple = vocab_size
a__ : Optional[int] = hidden_size
a__ : Any = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : List[Any] = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Optional[int] = max_position_embeddings
a__ : Any = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : Union[str, Any] = initializer_range
a__ : Union[str, Any] = num_labels
a__ : str = num_choices
a__ : Any = scope
a__ : Tuple = q_groups
a__ : Optional[Any] = k_groups
a__ : Optional[int] = v_groups
a__ : List[str] = post_attention_groups
a__ : Optional[int] = intermediate_groups
a__ : Union[str, Any] = output_groups
def _UpperCamelCase( self : List[Any] ):
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Optional[Any] = None
if self.use_input_mask:
a__ : str = random_attention_mask([self.batch_size, self.seq_length] )
a__ : str = None
a__ : int = None
a__ : Any = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
a__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase( self : Any ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ):
a__ : Dict = SqueezeBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Union[str, Any] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] ):
a__ : int = SqueezeBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Any = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = SqueezeBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Optional[Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ):
a__ : Optional[Any] = self.num_labels
a__ : Any = SqueezeBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : int ):
a__ : List[str] = self.num_labels
a__ : str = SqueezeBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ):
a__ : str = self.num_choices
a__ : Optional[Any] = SqueezeBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase( self : Any ):
a__ : Optional[Any] = self.prepare_config_and_inputs()
((a__), (a__), (a__), (a__), (a__), (a__)) : Any = config_and_inputs
a__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = True
_lowercase = False
def _UpperCamelCase( self : int ):
a__ : str = SqueezeBertModelTester(self )
a__ : List[str] = ConfigTester(self , config_class=lowerCamelCase__ , dim=37 )
def _UpperCamelCase( self : Optional[int] ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase__ )
def _UpperCamelCase( self : Dict ):
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase( self : Tuple ):
a__ : Tuple = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
a__ : Optional[Any] = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
a__ : Any = model(lowerCamelCase__ )[0]
a__ : str = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-4 ) )
| 37 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase : Optional[List[str]] = None
UpperCamelCase : Tuple = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase : int = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class A__ :
"""simple docstring"""
_lowercase = True
_lowercase = None
# Automatically constructed
_lowercase = "PIL.Image.Image"
_lowercase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_lowercase = field(default='Image' , init=A__ , repr=A__ )
def __call__( self : Optional[int] ):
return self.pa_type
def _UpperCamelCase( self : int , lowerCamelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Union[str, Any] = np.array(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase__ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _UpperCamelCase( self : Any , lowerCamelCase__ : dict , lowerCamelCase__ : List[Any]=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
a__ : List[Any] = {}
a__, a__ : str = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCamelCase__ ):
a__ : Optional[int] = PIL.Image.open(lowerCamelCase__ )
else:
a__ : Any = path.split("::" )[-1]
try:
a__ : Tuple = string_to_dict(lowerCamelCase__ , config.HUB_DATASETS_URL )["repo_id"]
a__ : int = token_per_repo_id.get(lowerCamelCase__ )
except ValueError:
a__ : List[Any] = None
with xopen(lowerCamelCase__ , "rb" , use_auth_token=lowerCamelCase__ ) as f:
a__ : Tuple = BytesIO(f.read() )
a__ : Tuple = PIL.Image.open(bytes_ )
else:
a__ : int = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _UpperCamelCase( self : Dict ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
a__ : Dict = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
a__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a__ : str = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
a__ : Tuple = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
a__ : Any = storage.field("bytes" )
else:
a__ : List[str] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
a__ : Tuple = storage.field("path" )
else:
a__ : List[Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
a__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
a__ : Tuple = pa.array(
[encode_np_array(np.array(lowerCamelCase__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
a__ : Tuple = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
a__ : Union[str, Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase__ : Any ):
with xopen(lowerCamelCase__ , "rb" ) as f:
a__ : int = f.read()
return bytes_
a__ : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a__ : Union[str, Any] = pa.array(
[os.path.basename(lowerCamelCase__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
a__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def UpperCamelCase_ ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
a__ : Union[str, Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCamelCase_ ( __a ) -> bytes:
a__ : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
a__ : List[Any] = image.format
else:
a__ : Union[str, Any] = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__a , format=__a )
return buffer.getvalue()
def UpperCamelCase_ ( __a ) -> dict:
if hasattr(__a , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__a )}
def UpperCamelCase_ ( __a ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
a__ : List[Any] = array.dtype
a__ : Tuple = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
a__ : Optional[int] = dtype.kind
a__ : Any = dtype.itemsize
a__ : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
a__ : Optional[Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
a__ : Optional[int] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
a__ : Optional[Any] = dtype_byteorder + dtype_kind + str(__a )
a__ : int = np.dtype(__a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
a__ : Tuple = PIL.Image.fromarray(array.astype(__a ) )
return {"path": None, "bytes": image_to_bytes(__a )}
def UpperCamelCase_ ( __a ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
a__, a__ : Any = first_non_null_value(__a )
if isinstance(__a , __a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__a , np.ndarray ):
a__ : Optional[Any] = no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
elif isinstance(__a , PIL.Image.Image ):
a__ : Optional[int] = no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
else:
return objs
else:
return objs
| 37 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ''
_lowercase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase = None # compression type in fsspec. ex: "gzip"
_lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ):
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a__ : str = fsspec.open(
lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
a__ : int = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
a__ : List[Any] = None
@classmethod
def _UpperCamelCase( cls : int , lowerCamelCase__ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" )
def _UpperCamelCase( self : Dict ):
if self.dir_cache is None:
a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
a__ : int = {f["name"]: f}
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ):
return self.file.open().read()
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ):
a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'bz2'
_lowercase = 'bz2'
_lowercase = '.bz2'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gzip'
_lowercase = 'gzip'
_lowercase = '.gz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'lz4'
_lowercase = 'lz4'
_lowercase = '.lz4'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'xz'
_lowercase = 'xz'
_lowercase = '.xz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'zstd'
_lowercase = 'zstd'
_lowercase = '.zst'
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ):
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a__ : Any = self.file.__enter__
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : str ):
a__ : List[Any] = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ):
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self : List[str] ):
return iter(self._file )
def _UpperCamelCase( self : Any ):
return next(self._file )
def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ):
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
a__ : Any = fixed_enter
| 37 | 1 |
def UpperCamelCase_ ( __a , __a , __a , __a ) -> int:
a__, a__ : Union[str, Any] = len(__a ), len(grid[0] )
if (
min(__a , __a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
a__ : List[Any] = 0
count += depth_first_search(__a , row + 1 , __a , __a )
count += depth_first_search(__a , row - 1 , __a , __a )
count += depth_first_search(__a , __a , col + 1 , __a )
count += depth_first_search(__a , __a , col - 1 , __a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
a__ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
a__ : Any = model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
a__ : Tuple = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(__a , __a , __a ):
a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ : int = to_tf_var_name(__a )
a__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ : int = torch_tensor.T
a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
a__ : int = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
a__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCamelCase_ ( __a=None ) -> int:
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
a__ : Optional[Any] = parser.parse_args(__a )
a__ : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37 | 1 |
from math import sqrt
def UpperCamelCase_ ( __a ) -> int:
a__ : Union[str, Any] = 0
for i in range(1 , int(sqrt(__a ) + 1 ) ):
if n % i == 0 and i != sqrt(__a ):
total += i + n // i
elif i == sqrt(__a ):
total += i
return total - n
def UpperCamelCase_ ( __a = 10_000 ) -> int:
a__ : str = sum(
i
for i in range(1 , __a )
if sum_of_divisors(sum_of_divisors(__a ) ) == i and sum_of_divisors(__a ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 37 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase : Any = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> int:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def UpperCamelCase_ ( __a , __a = None ) -> None:
a__ : Optional[int] = f'''\n{hint}''' if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __a ):
a__, a__, a__ : Dict = requirement, None, None
else:
a__ : Any = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __a )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f''' got {requirement}''' )
a__, a__ : Optional[Any] = match[0]
a__ : str = want_full.split("," ) # there could be multiple requirements
a__ : Tuple = {}
for w in want_range:
a__ : List[Any] = re.findall(R"^([\s!=<>]{1,2})(.+)" , __a )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f''' but got {requirement}''' )
a__, a__ : Tuple = match[0]
a__ : Optional[int] = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
a__ : Union[str, Any] = ".".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
a__ : Optional[int] = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def UpperCamelCase_ ( __a ) -> str:
a__ : Dict = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__a , __a )
| 37 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCamelCase : Dict = random.Random()
def UpperCamelCase_ ( __a , __a=1.0 , __a=None , __a=None ) -> Union[str, Any]:
if rng is None:
a__ : Optional[Any] = global_rng
a__ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=7 , lowerCamelCase__ : Any=400 , lowerCamelCase__ : str=2_000 , lowerCamelCase__ : str=10 , lowerCamelCase__ : List[Any]=160 , lowerCamelCase__ : str=8 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : List[Any]=4_000 , lowerCamelCase__ : str=False , lowerCamelCase__ : int=True , ):
a__ : Optional[Any] = parent
a__ : Dict = batch_size
a__ : List[Any] = min_seq_length
a__ : Union[str, Any] = max_seq_length
a__ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : List[str] = padding_value
a__ : Dict = sampling_rate
a__ : Optional[Any] = return_attention_mask
a__ : int = do_normalize
a__ : str = feature_size
a__ : Any = chunk_length
a__ : Optional[Any] = hop_length
def _UpperCamelCase( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : str=False ):
def _flatten(lowerCamelCase__ : int ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
a__ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Union[str, Any] = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = WhisperFeatureExtractionTester(self )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Union[str, Any] = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
a__ : str = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
a__ : Any = feat_extract_first.to_dict()
a__ : int = feat_extract_second.to_dict()
a__ : int = feat_extract_first.mel_filters
a__ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = os.path.join(lowerCamelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCamelCase__ )
a__ : Tuple = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
a__ : List[str] = feat_extract_first.to_dict()
a__ : List[str] = feat_extract_second.to_dict()
a__ : List[Any] = feat_extract_first.mel_filters
a__ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a__ : List[str] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
a__ : Optional[Any] = feature_extractor(lowerCamelCase__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a__ : int = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
a__ : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
a__ : int = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
a__ : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ : int = np.asarray(lowerCamelCase__ )
a__ : Dict = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
a__ : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test truncation required
a__ : int = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a__ : List[str] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
a__ : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs]
a__ : List[Any] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
a__ : List[str] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
a__ : List[Any] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def _UpperCamelCase( self : Dict ):
import torch
a__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
a__ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[Any] ):
a__ : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : Dict = ds.sort("id" ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _UpperCamelCase( self : Tuple ):
# fmt: off
a__ : int = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
a__ : str = self._load_datasamples(1 )
a__ : List[str] = WhisperFeatureExtractor()
a__ : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1E-4 ) )
def _UpperCamelCase( self : Optional[int] ):
a__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Dict = self._load_datasamples(1 )[0]
a__ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
a__ : Tuple = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1E-3 ) )
| 37 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_ ( ) -> int:
a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return image
def UpperCamelCase_ ( __a ) -> Optional[Any]:
a__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Union[str, Any] = dct.pop(__a )
a__ : List[str] = val
def UpperCamelCase_ ( __a , __a ) -> Optional[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) )
a__ : int = qkv_bias
def UpperCamelCase_ ( __a ) -> Dict:
a__ : Tuple = 364 if "coco" in model_name else 224
a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a )
return config, image_size
@torch.no_grad()
def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int:
a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
a__, a__ : List[str] = get_blipa_config(__a )
a__ : Any = InstructBlipForConditionalGeneration(__a ).eval()
a__ : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
a__, a__ : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu"
a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu"
a__, a__, a__ : Tuple = load_model_and_preprocess(
name=__a , model_type=__a , is_eval=__a , device=__a )
original_model.eval()
print("Done!" )
# update state dict keys
a__ : Dict = original_model.state_dict()
a__ : Optional[int] = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ : Optional[int] = state_dict.pop(__a )
if key.startswith("Qformer.bert" ):
a__ : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a__ : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
a__ : Dict = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
a__ : int = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
a__ : List[str] = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
a__ : str = key.replace("t5" , "language" )
a__ : Dict = val
# read in qv biases
read_in_q_v_bias(__a , __a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__a , strict=__a )
a__ : Union[str, Any] = load_demo_image()
a__ : int = "What is unusual about this image?"
# create processor
a__ : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a )
a__ : Tuple = InstructBlipProcessor(
image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , )
a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a )
# make sure processor creates exact same pixel values
a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a )
a__ : Optional[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a )
original_model.to(__a )
hf_model.to(__a )
with torch.no_grad():
if "vicuna" in model_name:
a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
a__ : List[str] = hf_model(**__a ).logits
else:
a__ : List[Any] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a )
a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
a__ : Any = hf_model(**__a , labels=__a ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a )
print("Looks ok!" )
print("Generating with original model..." )
a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
a__ : int = hf_model.generate(
**__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ : int = 2
print("Original generation:" , __a )
a__ : str = processor.batch_decode(__a , skip_special_tokens=__a )
a__ : str = [text.strip() for text in output_text]
print("HF generation:" , __a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
UpperCamelCase : Optional[int] = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
a__ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
a__ : Any = model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
a__ : Tuple = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(__a , __a , __a ):
a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ : int = to_tf_var_name(__a )
a__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ : int = torch_tensor.T
a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
a__ : int = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
a__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCamelCase_ ( __a=None ) -> int:
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
a__ : Optional[Any] = parser.parse_args(__a )
a__ : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37 |
def UpperCamelCase_ ( __a , __a ) -> Tuple:
a__ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Any = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 37 | 1 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_ ( __a ) -> Any:
return np.maximum(0 , __a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 37 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LEDTokenizer
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : Optional[Any] = add_prefix_space
a__ : List[str] = pre_tok_class(**lowerCamelCase__ )
a__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Any = "post_processor"
a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
a__ : Optional[Any] = tuple(state["cls"] )
a__ : Optional[int] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Dict = add_prefix_space
a__ : int = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : List[Any] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : int = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : Union[str, Any] = value
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ):
a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[str] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : str = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : int=3 , lowerCamelCase__ : int=10 , lowerCamelCase__ : Optional[Any]=[10, 20, 30, 40] , lowerCamelCase__ : Any=[1, 1, 2, 1] , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[str]=None , ):
a__ : List[str] = parent
a__ : Tuple = batch_size
a__ : int = image_size
a__ : List[str] = num_channels
a__ : List[Any] = embeddings_size
a__ : Optional[Any] = hidden_sizes
a__ : Union[str, Any] = depths
a__ : Tuple = is_training
a__ : Any = use_labels
a__ : List[Any] = hidden_act
a__ : Dict = num_labels
a__ : Union[str, Any] = scope
a__ : Tuple = len(lowerCamelCase__ )
def _UpperCamelCase( self : Any ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
if self.use_labels:
a__ : int = ids_tensor([self.batch_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase( self : Dict ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ):
a__ : Optional[int] = TFRegNetModel(config=lowerCamelCase__ )
a__ : List[str] = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ):
a__ : List[Any] = self.num_labels
a__ : List[Any] = TFRegNetForImageClassification(lowerCamelCase__ )
a__ : Dict = model(lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_lowercase = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : List[str] ):
a__ : int = TFRegNetModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _UpperCamelCase( self : Union[str, Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _UpperCamelCase( self : List[Any] ):
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Tuple ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
a__ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
def check_hidden_states_output(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ):
a__ : List[str] = model_class(lowerCamelCase__ )
a__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) , training=lowerCamelCase__ )
a__ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ : Tuple = layer_type
a__ : List[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Tuple = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict={} ):
a__ : Optional[int] = model(lowerCamelCase__ , return_dict=lowerCamelCase__ , **lowerCamelCase__ )
a__ : List[Any] = model(lowerCamelCase__ , return_dict=lowerCamelCase__ , **lowerCamelCase__ ).to_tuple()
def recursive_check(lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ):
if isinstance(lowerCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase__ , lowerCamelCase__ ):
recursive_check(lowerCamelCase__ , lowerCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCamelCase__ , lowerCamelCase__ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowerCamelCase__ , lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Optional[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {"output_hidden_states": True} )
a__ : Dict = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Optional[int] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {"output_hidden_states": True} )
def _UpperCamelCase( self : int ):
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : str ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TFRegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> int:
a__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase( self : Any ):
a__ : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a__ : Tuple = self.default_image_processor
a__ : Union[str, Any] = prepare_img()
a__ : Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors="tf" )
# forward pass
a__ : Optional[int] = model(**lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
a__ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : int = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
| 37 |
from statistics import mean, stdev
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : List[str] = min(__a )
a__ : str = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def UpperCamelCase_ ( __a , __a = 3 ) -> list:
a__ : str = mean(__a )
a__ : List[str] = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 37 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'unispeech-sat'
def __init__( self : Optional[Any] , lowerCamelCase__ : str=32 , lowerCamelCase__ : List[str]=768 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : int=12 , lowerCamelCase__ : Union[str, Any]=3_072 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Dict=0.02 , lowerCamelCase__ : Dict=1E-5 , lowerCamelCase__ : str="group" , lowerCamelCase__ : Union[str, Any]="gelu" , lowerCamelCase__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase__ : int=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__ : int=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__ : Any=False , lowerCamelCase__ : Tuple=128 , lowerCamelCase__ : List[str]=16 , lowerCamelCase__ : str=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=0.05 , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : int=2 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : Union[str, Any]=0 , lowerCamelCase__ : Union[str, Any]=320 , lowerCamelCase__ : int=2 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : str=100 , lowerCamelCase__ : int=256 , lowerCamelCase__ : str=256 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : str="mean" , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : List[str]=256 , lowerCamelCase__ : Any=(512, 512, 512, 512, 1_500) , lowerCamelCase__ : Optional[int]=(5, 3, 3, 1, 1) , lowerCamelCase__ : int=(1, 2, 3, 1, 1) , lowerCamelCase__ : Optional[int]=512 , lowerCamelCase__ : Union[str, Any]=0 , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=504 , **lowerCamelCase__ : str , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
a__ : Optional[int] = hidden_size
a__ : Optional[Any] = feat_extract_norm
a__ : Any = feat_extract_activation
a__ : Union[str, Any] = list(lowerCamelCase__ )
a__ : str = list(lowerCamelCase__ )
a__ : int = list(lowerCamelCase__ )
a__ : str = conv_bias
a__ : List[Any] = num_conv_pos_embeddings
a__ : Any = num_conv_pos_embedding_groups
a__ : Tuple = len(self.conv_dim )
a__ : Tuple = num_hidden_layers
a__ : Optional[Any] = intermediate_size
a__ : Tuple = hidden_act
a__ : Optional[Any] = num_attention_heads
a__ : int = hidden_dropout
a__ : Optional[int] = attention_dropout
a__ : Dict = activation_dropout
a__ : Optional[Any] = feat_proj_dropout
a__ : str = final_dropout
a__ : List[Any] = layerdrop
a__ : Any = layer_norm_eps
a__ : str = initializer_range
a__ : Dict = vocab_size
a__ : Tuple = num_clusters
a__ : Dict = do_stable_layer_norm
a__ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : List[str] = apply_spec_augment
a__ : Optional[Any] = mask_time_prob
a__ : str = mask_time_length
a__ : Optional[Any] = mask_time_min_masks
a__ : List[str] = mask_feature_prob
a__ : Optional[Any] = mask_feature_length
a__ : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a__ : List[str] = num_codevectors_per_group
a__ : int = num_codevector_groups
a__ : str = contrastive_logits_temperature
a__ : List[str] = feat_quantizer_dropout
a__ : int = num_negatives
a__ : Tuple = codevector_dim
a__ : Tuple = proj_codevector_dim
a__ : List[str] = diversity_loss_weight
# ctc loss
a__ : Dict = ctc_loss_reduction
a__ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a__ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a__ : Union[str, Any] = list(lowerCamelCase__ )
a__ : Any = list(lowerCamelCase__ )
a__ : List[str] = list(lowerCamelCase__ )
a__ : Any = xvector_output_dim
@property
def _UpperCamelCase( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 37 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : str=2 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : int=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : Union[str, Any]=32 * 8 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[int]=64 , ):
a__ : List[str] = parent
a__ : List[Any] = batch_size
a__ : Any = is_training
a__ : Optional[Any] = use_auxiliary_loss
a__ : Dict = num_queries
a__ : Any = num_channels
a__ : List[Any] = min_size
a__ : Dict = max_size
a__ : Dict = num_labels
a__ : str = hidden_dim
a__ : int = hidden_dim
def _UpperCamelCase( self : Union[str, Any] ):
a__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
a__ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
a__ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
a__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
a__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ : str = self.num_queries
a__ : Tuple = self.num_labels
a__ : Optional[int] = [1, 1, 1, 1]
a__ : Dict = self.num_channels
a__ : int = 64
a__ : Optional[Any] = 128
a__ : Optional[int] = self.hidden_dim
a__ : Dict = self.hidden_dim
a__ : Optional[Any] = self.hidden_dim
return config
def _UpperCamelCase( self : Optional[Any] ):
a__, a__, a__, a__, a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__ : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ):
a__ : Any = output.encoder_hidden_states
a__ : Optional[Any] = output.pixel_decoder_hidden_states
a__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=False ):
with torch.no_grad():
a__ : List[str] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
a__ : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ):
a__ : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a__ : Tuple = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
a__ : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
a__ : Optional[Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowercase = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = MaskaFormerModelTester(self )
a__ : List[str] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : int ):
a__, a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _UpperCamelCase( self : Optional[Any] ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _UpperCamelCase( self : Dict ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _UpperCamelCase( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase( self : int ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : List[str] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ : Union[str, Any] = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = (self.model_tester.min_size,) * 2
a__ : Tuple = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
a__ : int = self.model_tester.get_config()
a__ : Any = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Optional[int] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Any = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__ : Union[str, Any] = self.all_model_classes[1]
a__, a__, a__, a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs()
a__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : int = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Optional[int] ):
a__ : str = self.all_model_classes[1]
a__, a__, a__, a__, a__ : int = self.model_tester.prepare_config_and_inputs()
a__ : Optional[Any] = True
a__ : str = True
a__ : Any = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
a__ : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase : Any = 1E-4
def UpperCamelCase_ ( ) -> str:
a__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : str ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase( self : str ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase( self : Dict ):
a__ : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
a__ : List[Any] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
a__ : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ : str = model(**lowerCamelCase__ )
a__ : str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
a__ : List[str] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
a__ : Tuple = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : List[Any] ):
a__ : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
a__ : List[Any] = self.default_image_processor
a__ : str = prepare_img()
a__ : Union[str, Any] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
a__ : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
# masks_queries_logits
a__ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a__ : Optional[int] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
a__ : int = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
a__ : Any = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a__ : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : str ):
a__ : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
a__ : List[Any] = self.default_image_processor
a__ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a__ : Any = inputs["pixel_values"].to(lowerCamelCase__ )
a__ : List[Any] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
a__ : List[Any] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 37 |
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
a__ : str = name
a__ : Optional[int] = value
a__ : Dict = weight
def __repr__( self : Union[str, Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCamelCase( self : Dict ):
return self.value
def _UpperCamelCase( self : Optional[Any] ):
return self.name
def _UpperCamelCase( self : Optional[Any] ):
return self.weight
def _UpperCamelCase( self : Optional[int] ):
return self.value / self.weight
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]:
a__ : List[str] = sorted(__a , key=__a , reverse=__a )
a__ : List[Any] = []
a__, a__ : Union[str, Any] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase_ ( ) -> Union[str, Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 1 |
import gc
import threading
import time
import psutil
import torch
class A__ :
"""simple docstring"""
def __init__( self : int ):
a__ : Optional[int] = psutil.Process()
a__ : Union[str, Any] = False
def _UpperCamelCase( self : Any ):
a__ : str = -1
while True:
a__ : Dict = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _UpperCamelCase( self : Dict ):
a__ : int = True
a__ : Tuple = threading.Thread(target=self.peak_monitor )
a__ : Dict = True
self.thread.start()
def _UpperCamelCase( self : Any ):
a__ : Dict = False
self.thread.join()
return self.cpu_memory_peak
UpperCamelCase : List[str] = PeakCPUMemory()
def UpperCamelCase_ ( ) -> int:
# Time
a__ : List[Any] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a__ : Union[str, Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
a__ : Dict = torch.cuda.memory_allocated(__a )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCamelCase_ ( __a ) -> Any:
# Time
a__ : Union[str, Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a__ : Any = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
a__ : Optional[int] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
a__ : Optional[Any] = (torch.cuda.memory_allocated(__a ) - start_measures[str(__a )]) / 2**20
a__ : int = (torch.cuda.max_memory_allocated(__a ) - start_measures[str(__a )]) / 2**20
return measures
def UpperCamelCase_ ( __a , __a ) -> Optional[Any]:
print(f'''{description}:''' )
print(f'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(__a )]:.2f}MiB''' )
a__ : Dict = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 37 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 1 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 | 1 |
from __future__ import annotations
from collections.abc import Callable
def UpperCamelCase_ ( __a , __a , __a , __a = 100 , ) -> float:
a__ : Dict = x_start
a__ : Optional[int] = fnc(__a )
a__ : Union[str, Any] = 0.0
for _ in range(__a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a__ : int = (x_end - x_start) / steps + xa
a__ : Union[str, Any] = fnc(__a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a__ : int = xa
a__ : Union[str, Any] = fxa
return area
if __name__ == "__main__":
def UpperCamelCase_ ( __a ) -> int:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
UpperCamelCase : Tuple = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 37 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : str = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Any:
# initialize config
if "resnet-50" in model_name:
a__ : int = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a__ : Optional[int] = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a__ : Tuple = DetrConfig(use_timm_backbone=__a , backbone_config=__a )
# set label attributes
a__ : Any = "panoptic" in model_name
if is_panoptic:
a__ : Dict = 250
else:
a__ : Union[str, Any] = 91
a__ : str = "huggingface/label-files"
a__ : Tuple = "coco-detection-id2label.json"
a__ : List[str] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
a__ : str = {int(__a ): v for k, v in idalabel.items()}
a__ : Union[str, Any] = idalabel
a__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_ ( __a ) -> Dict:
# here we list all keys to be renamed (original name on the left, our name on the right)
a__ : List[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase_ ( __a , __a , __a ) -> int:
a__ : Tuple = state_dict.pop(__a )
a__ : Any = val
def UpperCamelCase_ ( __a , __a=False ) -> int:
a__ : Optional[int] = ""
if is_panoptic:
a__ : Optional[Any] = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a__ : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : Tuple = in_proj_weight[:256, :]
a__ : List[Any] = in_proj_bias[:256]
a__ : Any = in_proj_weight[256:512, :]
a__ : Dict = in_proj_bias[256:512]
a__ : int = in_proj_weight[-256:, :]
a__ : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a__ : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : str = in_proj_weight[:256, :]
a__ : List[str] = in_proj_bias[:256]
a__ : Any = in_proj_weight[256:512, :]
a__ : List[Any] = in_proj_bias[256:512]
a__ : Dict = in_proj_weight[-256:, :]
a__ : List[str] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a__ : List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ : str = in_proj_weight_cross_attn[:256, :]
a__ : str = in_proj_bias_cross_attn[:256]
a__ : Dict = in_proj_weight_cross_attn[256:512, :]
a__ : Dict = in_proj_bias_cross_attn[256:512]
a__ : Dict = in_proj_weight_cross_attn[-256:, :]
a__ : Tuple = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_ ( ) -> int:
a__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( __a , __a=None , __a=False ) -> Optional[int]:
a__, a__ : List[str] = get_detr_config(__a )
# load original model from torch hub
a__ : str = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
a__ : int = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=__a ).eval()
a__ : Dict = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__a ):
if is_panoptic:
a__ : Dict = "detr." + src
rename_key(__a , __a , __a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a , is_panoptic=__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ : Optional[int] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a__ : Any = state_dict.pop(__a )
a__ : List[str] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ : Optional[Any] = state_dict.pop(__a )
a__ : int = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a__ : Dict = state_dict.pop(__a )
a__ : Optional[int] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a__ : Tuple = state_dict.pop(__a )
a__ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
a__ : Optional[Any] = DetrForSegmentation(__a ) if is_panoptic else DetrForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion on an image
a__ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
a__ : Optional[int] = DetrImageProcessor(format=__a )
a__ : Union[str, Any] = processor(images=prepare_img() , return_tensors="pt" )
a__ : List[str] = encoding["pixel_values"]
a__ : Tuple = detr(__a )
a__ : List[str] = model(__a )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
UpperCamelCase : List[Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase__ , )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
class A__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[int] ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase__ , )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> int:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def UpperCamelCase_ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A__ ( A__ ):
"""simple docstring"""
@require_beam
def _UpperCamelCase( self : Tuple ):
a__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ : Optional[int] = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _UpperCamelCase( self : Any ):
import apache_beam as beam
a__ : str = beam.io.parquetio.WriteToParquet
a__ : List[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ : Any = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
a__ : Optional[Any] = partial(lowerCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a__ : str = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ : List[Any] = DummyBeamDataset(cache_dir=lowerCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCamelCase( self : Dict ):
a__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ : Dict = NestedBeamDataset(cache_dir=lowerCamelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
a__ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 37 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ : str = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase__ , cache_dir=lowerCamelCase__ )
a__ : Dict = [t[-1] for t in os.walk(os.path.join(lowerCamelCase__ , os.listdir(lowerCamelCase__ )[0] , "snapshots" ) )]
a__ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : str ):
a__, a__ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase__ )
a__ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Dict = jax.random.PRNGKey(0 )
a__ : Tuple = 4
a__ : int = jax.device_count()
a__ : int = num_samples * [prompt]
a__ : Tuple = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : str = replicate(lowerCamelCase__ )
a__ : Dict = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = shard(lowerCamelCase__ )
a__ : Dict = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
a__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase__ ) == num_samples
def _UpperCamelCase( self : int ):
a__, a__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCamelCase__ )
a__ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Optional[Any] = jax.random.PRNGKey(0 )
a__ : Optional[Any] = 50
a__ : Any = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : Dict = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : int = replicate(lowerCamelCase__ )
a__ : List[Any] = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = shard(lowerCamelCase__ )
a__ : Optional[Any] = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ )
a__ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Optional[int] = jax.random.PRNGKey(0 )
a__ : int = 50
a__ : List[Any] = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : Dict = replicate(lowerCamelCase__ )
a__ : Dict = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = shard(lowerCamelCase__ )
a__ : Tuple = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
a__ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : List[str] = jax.random.PRNGKey(0 )
a__ : Tuple = 50
a__ : Optional[Any] = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : int = replicate(lowerCamelCase__ )
a__ : Union[str, Any] = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = shard(lowerCamelCase__ )
a__ : Union[str, Any] = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , )
a__, a__ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
a__ : List[str] = scheduler.create_state()
a__ : Optional[int] = scheduler_state
a__ : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : Optional[int] = jax.random.PRNGKey(0 )
a__ : Dict = 50
a__ : Dict = jax.device_count()
a__ : Any = num_samples * [prompt]
a__ : str = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
a__ : Dict = replicate(lowerCamelCase__ )
a__ : Tuple = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
a__ : Tuple = shard(lowerCamelCase__ )
a__ : int = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a__ : int = jax.device_count()
a__ : Union[str, Any] = num_samples * [prompt]
a__ : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase__ )
a__, a__ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , )
a__ : Dict = replicate(lowerCamelCase__ )
a__ : Dict = pipeline.prepare_inputs(lowerCamelCase__ )
a__ : List[str] = shard(lowerCamelCase__ )
a__ : Union[str, Any] = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a__ : Optional[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a__, a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , use_memory_efficient_attention=lowerCamelCase__ , )
a__ : Any = replicate(lowerCamelCase__ )
a__ : Union[str, Any] = pipeline.prepare_inputs(lowerCamelCase__ )
a__ : Union[str, Any] = shard(lowerCamelCase__ )
a__ : Dict = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a__ : Dict = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 37 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( __a ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( __a , __a , __a ) -> Any:
return max(metric_fn(__a , __a ) for gt in ground_truths )
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = []
if args.gold_data_mode == "qa":
a__ : Any = pd.read_csv(__a , sep="\t" , header=__a )
for answer_list in data[1]:
a__ : Union[str, Any] = ast.literal_eval(__a )
answers.append(__a )
else:
a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : List[str] = [[reference] for reference in references]
a__ : List[str] = 0
for prediction, ground_truths in zip(__a , __a ):
total += 1
em += metric_max_over_ground_truths(__a , __a , __a )
fa += metric_max_over_ground_truths(__a , __a , __a )
a__ : Dict = 100.0 * em / total
a__ : Optional[Any] = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Optional[Any] = args.k
a__ : str = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()]
a__ : Tuple = 0
for hypo, reference in zip(__a , __a ):
a__ : Any = set(hypo.split("\t" )[:k] )
a__ : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ : Union[str, Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
def strip_title(__a ):
if title.startswith("\"" ):
a__ : Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ : Union[str, Any] = title[:-1]
return title
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device )
a__ : Optional[int] = rag_model.rag.question_encoder(__a )
a__ : Union[str, Any] = question_enc_outputs[0]
a__ : Optional[int] = rag_model.retriever(
__a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ : int = []
for docs in all_docs:
a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]]
provenance_strings.append("\t".join(__a ) )
return provenance_strings
def UpperCamelCase_ ( __a , __a , __a ) -> Dict:
with torch.no_grad():
a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__a , return_tensors="pt" , padding=__a , truncation=__a )
a__ : Any = inputs_dict.input_ids.to(args.device )
a__ : Dict = inputs_dict.attention_mask.to(args.device )
a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
__a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a )
if args.print_predictions:
for q, a in zip(__a , __a ):
logger.info("Q: {} - A: {}".format(__a , __a ) )
return answers
def UpperCamelCase_ ( ) -> List[str]:
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__a , type=__a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ : int = parser.parse_args()
a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCamelCase_ ( __a ) -> Optional[int]:
a__ : Tuple = {}
if args.model_type is None:
a__ : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ : Tuple = args.n_docs
if args.index_name is not None:
a__ : Any = args.index_name
if args.index_path is not None:
a__ : int = args.index_path
else:
a__ : Optional[Any] = BartForConditionalGeneration
a__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __a )
a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ : str = RagRetriever.from_pretrained(__a , **__a )
a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a )
model.retriever.init_retrieval()
else:
a__ : Dict = model_class.from_pretrained(__a , **__a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ : List[Any] = []
for line in tqdm(__a ):
questions.append(line.strip() )
if len(__a ) == args.eval_batch_size:
a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) + "\n" )
preds_file.flush()
a__ : Any = []
if len(__a ) > 0:
a__ : List[str] = evaluate_batch_fn(__a , __a , __a )
preds_file.write("\n".join(__a ) )
preds_file.flush()
score_fn(__a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : List[Any] = get_args()
main(args)
| 37 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.