code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = '''RegNetConfig'''
# Base docstring
a__ = '''facebook/regnet-y-040'''
a__ = [1, 1088, 7, 7]
# Image classification docstring
a__ = '''facebook/regnet-y-040'''
a__ = '''tabby, tabby cat'''
a__ = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , _a = 3 , _a = 1 , _a = 1 , _a = "relu" , **_a , ) -> str:
super().__init__(**_a )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_a : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_a : Tuple = tf.keras.layers.ConvaD(
filters=_a , kernel_size=_a , strides=_a , padding='''VALID''' , groups=_a , use_bias=_a , name='''convolution''' , )
_a : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_a : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def __lowercase ( self , _a ) -> List[Any]:
_a : List[Any] = self.convolution(self.padding(_a ) )
_a : Tuple = self.normalization(_a )
_a : Union[str, Any] = self.activation(_a )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , **_a ) -> Tuple:
super().__init__(**_a )
_a : List[str] = config.num_channels
_a : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : List[str] = shape_list(_a )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_a : Dict = tf.transpose(_a , perm=(0, 2, 3, 1) )
_a : List[str] = self.embedder(_a )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , _a = 2 , **_a ) -> int:
super().__init__(**_a )
_a : Optional[Any] = tf.keras.layers.ConvaD(
filters=_a , kernel_size=1 , strides=_a , use_bias=_a , name='''convolution''' )
_a : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def __lowercase ( self , _a , _a = False ) -> tf.Tensor:
return self.normalization(self.convolution(_a ) , training=_a )
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , _a , **_a ) -> int:
super().__init__(**_a )
_a : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_a , name='''pooler''' )
_a : Optional[int] = [
tf.keras.layers.ConvaD(filters=_a , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_a , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def __lowercase ( self , _a ) -> List[Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_a : str = self.pooler(_a )
for layer_module in self.attention:
_a : Dict = layer_module(_a )
_a : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , _a , _a , _a = 1 , **_a ) -> Union[str, Any]:
super().__init__(**_a )
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : str = max(1 , out_channels // config.groups_width )
_a : Tuple = (
TFRegNetShortCut(_a , stride=_a , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_a : Tuple = [
TFRegNetConvLayer(_a , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_a , stride=_a , groups=_a , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_a , kernel_size=1 , activation=_a , name='''layer.2''' ),
]
_a : int = ACTaFN[config.hidden_act]
def __lowercase ( self , _a ) -> Any:
_a : Tuple = hidden_state
for layer_module in self.layers:
_a : List[str] = layer_module(_a )
_a : Optional[int] = self.shortcut(_a )
hidden_state += residual
_a : Union[str, Any] = self.activation(_a )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , _a , _a , _a = 1 , **_a ) -> List[str]:
super().__init__(**_a )
_a : List[str] = in_channels != out_channels or stride != 1
_a : Union[str, Any] = max(1 , out_channels // config.groups_width )
_a : int = (
TFRegNetShortCut(_a , stride=_a , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_a : Any = [
TFRegNetConvLayer(_a , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_a , stride=_a , groups=_a , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_a , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_a , kernel_size=1 , activation=_a , name='''layer.3''' ),
]
_a : Union[str, Any] = ACTaFN[config.hidden_act]
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = hidden_state
for layer_module in self.layers:
_a : str = layer_module(_a )
_a : int = self.shortcut(_a )
hidden_state += residual
_a : List[Any] = self.activation(_a )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , _a , _a , _a = 2 , _a = 2 , **_a ) -> Union[str, Any]:
super().__init__(**_a )
_a : Union[str, Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_a : Any = [
# downsampling is done in the first layer with stride of 2
layer(_a , _a , _a , stride=_a , name='''layers.0''' ),
*[layer(_a , _a , _a , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __lowercase ( self , _a ) -> Any:
for layer_module in self.layers:
_a : Tuple = layer_module(_a )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _a , **_a ) -> List[str]:
super().__init__(**_a )
_a : int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_a : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_a , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_a , _a , _a , depth=_a , name=F"""stages.{i+1}""" ) )
def __lowercase ( self , _a , _a = False , _a = True ) -> TFBaseModelOutputWithNoAttention:
_a : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Optional[Any] = hidden_states + (hidden_state,)
_a : Union[str, Any] = stage_module(_a )
if output_hidden_states:
_a : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
@keras_serializable
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = RegNetConfig
def __init__( self , _a , **_a ) -> Optional[int]:
super().__init__(**_a )
_a : List[Any] = config
_a : Union[str, Any] = TFRegNetEmbeddings(_a , name='''embedder''' )
_a : List[str] = TFRegNetEncoder(_a , name='''encoder''' )
_a : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_a , name='''pooler''' )
@unpack_inputs
def __lowercase ( self , _a , _a = None , _a = None , _a = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
_a : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Union[str, Any] = self.embedder(_a , training=_a )
_a : Any = self.encoder(
_a , output_hidden_states=_a , return_dict=_a , training=_a )
_a : Tuple = encoder_outputs[0]
_a : int = self.pooler(_a )
# Change to NCHW output format have uniformity in the modules
_a : Union[str, Any] = tf.transpose(_a , perm=(0, 3, 1, 2) )
_a : Optional[int] = tf.transpose(_a , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_a : int = tuple([tf.transpose(_a , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = RegNetConfig
UpperCAmelCase__ : int = "regnet"
UpperCAmelCase__ : Dict = "pixel_values"
@property
def __lowercase ( self ) -> Tuple:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
a__ = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , *_a , **_a ) -> Any:
super().__init__(_a , *_a , **_a )
_a : Optional[Any] = TFRegNetMainLayer(_a , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self , _a , _a = None , _a = None , _a=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
_a : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Tuple = self.regnet(
pixel_values=_a , output_hidden_states=_a , return_dict=_a , training=_a , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowercase , )
class UpperCAmelCase_ ( __lowercase , __lowercase ):
"""simple docstring"""
def __init__( self , _a , *_a , **_a ) -> Tuple:
super().__init__(_a , *_a , **_a )
_a : str = config.num_labels
_a : int = TFRegNetMainLayer(_a , name='''regnet''' )
# classification head
_a : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self , _a = None , _a = None , _a = None , _a = None , _a=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
_a : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_a : List[str] = self.regnet(
_a , output_hidden_states=_a , return_dict=_a , training=_a )
_a : Dict = outputs.pooler_output if return_dict else outputs[1]
_a : Dict = self.classifier[0](_a )
_a : List[str] = self.classifier[1](_a )
_a : List[Any] = None if labels is None else self.hf_compute_loss(labels=_a , logits=_a )
if not return_dict:
_a : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a__ = get_logger()
a__ = None
class UpperCAmelCase_ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self , _a=None , _a=None , **_a ) -> str:
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
_a : List[str] = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_a : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_a : Any = str(jax.devices()[0] )
_a : Optional[Any] = jnp_array_kwargs
@staticmethod
def __lowercase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(_a ): device for device in jax.devices()}
def __lowercase ( self , _a ) -> str:
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def __lowercase ( self , _a ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_a : str = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_a : Dict = {'''dtype''': jnp.intaa}
else:
_a : Optional[int] = {'''dtype''': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_a : Optional[Any] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
_a : int = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_a : str = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def __lowercase ( self , _a ) -> int:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '''__array__''' ) and not isinstance(_a , jax.Array ):
_a : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def __lowercase ( self , _a ) -> List[Any]:
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def __lowercase ( self , _a ) -> Mapping:
_a : Any = self.numpy_arrow_extractor().extract_row(_a )
_a : int = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def __lowercase ( self , _a ) -> "jax.Array":
_a : int = self.numpy_arrow_extractor().extract_column(_a )
_a : Dict = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
_a : Optional[Any] = self.recursive_tensorize(_a )
_a : Dict = self._consolidate(_a )
return column
def __lowercase ( self , _a ) -> Mapping:
_a : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(_a )
_a : Optional[Any] = self.python_features_decoder.decode_batch(_a )
_a : Any = self.recursive_tensorize(_a )
for column_name in batch:
_a : int = self._consolidate(batch[column_name] )
return batch
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a__ = logging.getLogger(__name__)
a__ = '''Hello world! cécé herlolip'''
a__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = BertAbsConfig(
temp_dir='''.''' ,finetune_bert=__a ,large=__a ,share_emb=__a ,use_bert_emb=__a ,encoder='''bert''' ,max_pos=512 ,enc_layers=6 ,enc_hidden_size=512 ,enc_heads=8 ,enc_ff_size=512 ,enc_dropout=0.2 ,dec_layers=6 ,dec_hidden_size=768 ,dec_heads=8 ,dec_ff_size=2_048 ,dec_dropout=0.2 ,)
_a : int = torch.load(__a ,lambda __a ,__a : storage )
_a : Union[str, Any] = AbsSummarizer(__a ,torch.device('''cpu''' ) ,__a )
original.eval()
_a : List[str] = BertAbsSummarizer(__a ,torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
_a : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
_a : int = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__a )) )
_a : List[str] = torch.tensor(__a ).unsqueeze(0 )
_a : Union[str, Any] = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__a )) )
_a : str = torch.tensor(__a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_a : Dict = encoder_input_ids
_a : Union[str, Any] = decoder_input_ids
_a : List[Any] = None
_a : int = None
_a : Dict = None
_a : Optional[int] = None
_a : str = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_a : Optional[Any] = original(__a ,__a ,__a ,__a ,__a ,__a ,__a )[0]
_a : Optional[int] = original.generator(__a )
_a : Tuple = new_model(
__a ,__a ,__a ,__a ,__a )[0]
_a : int = new_model.generator(__a )
_a : Union[str, Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__a ) )
_a : List[str] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__a ) )
_a : List[str] = torch.allclose(__a ,__a ,atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() ,'''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
a__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=9_9 , _a=3_2 , _a=5 , _a=4 , _a=6_4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=2 , _a=2 , _a=2 , _a=2 , _a=4 , _a=1 , ) -> str:
_a : Optional[Any] = parent
_a : Tuple = batch_size
_a : Any = seq_length
_a : Optional[Any] = is_training
_a : List[Any] = use_input_mask
_a : str = use_token_type_ids
_a : Dict = use_labels
_a : Dict = vocab_size
_a : Any = hidden_size
_a : List[Any] = num_hidden_layers
_a : Any = num_attention_heads
_a : Tuple = intermediate_size
_a : Dict = hidden_act
_a : str = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Optional[int] = max_position_embeddings
_a : Any = type_vocab_size
_a : Union[str, Any] = type_sequence_label_size
_a : str = initializer_range
_a : Dict = num_labels
_a : Optional[Any] = num_choices
_a : Optional[int] = scope
_a : int = q_groups
_a : List[str] = k_groups
_a : str = v_groups
_a : str = post_attention_groups
_a : Dict = intermediate_groups
_a : Tuple = output_groups
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Optional[int] = None
if self.use_input_mask:
_a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Any = None
_a : Optional[int] = None
_a : Any = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : str = ids_tensor([self.batch_size] , self.num_choices )
_a : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str:
_a : Tuple = SqueezeBertModel(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a , _a )
_a : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Any:
_a : int = SqueezeBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Tuple:
_a : List[str] = SqueezeBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Dict = self.num_labels
_a : str = SqueezeBertForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
_a : int = self.num_labels
_a : Any = SqueezeBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Dict = self.num_choices
_a : List[Any] = SqueezeBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[Any] = model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> str:
_a : Optional[Any] = self.prepare_config_and_inputs()
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : Tuple = config_and_inputs
_a : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase__ : Union[str, Any] = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Optional[Any] = False
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = SqueezeBertModelTester(self )
_a : List[Any] = ConfigTester(self , config_class=_a , dim=3_7 )
def __lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Tuple:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_a )
def __lowercase ( self ) -> Optional[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_a )
def __lowercase ( self ) -> str:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_a )
def __lowercase ( self ) -> Optional[Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_a )
@slow
def __lowercase ( self ) -> List[str]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[int] = SqueezeBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> List[str]:
_a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
_a : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_a : List[str] = model(_a )[0]
_a : Any = torch.Size((1, 3) )
self.assertEqual(output.shape , _a )
_a : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_a , _a , atol=1e-4 ) )
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : nn.ModuleList ,__a : nn.ModuleList ,__a : List[int] ) -> None:
"""simple docstring"""
_a : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__a ) == len(__a ), F"""{len(__a )} != {len(__a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
try:
_a : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__a ) )
def __UpperCAmelCase ( __a : Dict ,__a : List[str] ) -> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __UpperCAmelCase ( __a : Union[str, PreTrainedModel] ,__a : Union[str, Path] = "student" ,__a : Union[int, None] = None ,__a : Union[int, None] = None ,__a : Any=False ,__a : Any=None ,__a : int=None ,**__a : Optional[int] ,) -> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_a : Dict = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__a ,__a ):
AutoTokenizer.from_pretrained(__a ).save_pretrained(__a ) # purely for convenience
_a : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__a ).eval()
else:
assert isinstance(__a ,__a ), F"""teacher must be a model or string got type {type(__a )}"""
_a : Dict = teacher.config.to_diff_dict()
try:
_a , _a : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_a : Union[str, Any] = teacher_e
if d is None:
_a : List[Any] = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config ,'''num_encoder_layers''' ):
_a , _a : List[str] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_a , _a : Optional[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_a : Optional[int] = teacher_e
if d is None:
_a : Optional[Any] = teacher_d
if hasattr(teacher.config ,'''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__a )
# Copy weights
_a : List[str] = teacher.config_class(**__a )
_a : Dict = AutoModelForSeqaSeqLM.from_config(__a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_a : str = student.load_state_dict(teacher.state_dict() ,strict=__a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_a , _a : List[str] = list(range(__a ) ), list(range(__a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_a : List[int] = pick_layers_to_copy(__a ,__a )
if d_layers_to_copy is None:
_a : List[int] = pick_layers_to_copy(__a ,__a )
try:
if hasattr(
__a ,'''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers ,student.prophetnet.encoder.layers ,__a )
copy_layers(teacher.prophetnet.decoder.layers ,student.prophetnet.decoder.layers ,__a )
else:
copy_layers(teacher.model.encoder.layers ,student.model.encoder.layers ,__a )
copy_layers(teacher.model.decoder.layers ,student.model.decoder.layers ,__a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block ,student.encoder.block ,__a )
copy_layers(teacher.decoder.block ,student.decoder.block ,__a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_a : List[Any] = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
from __future__ import annotations
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
"""simple docstring"""
_a : dict[int, int] = {}
_a : int = 2
while True:
_a : Union[str, Any] = factor_map.pop(__a ,__a )
if factor:
_a : str = factor + prime
while x in factor_map:
x += factor
_a : Any = factor
else:
_a : Tuple = prime
yield prime
prime += 1
def __UpperCAmelCase ( __a : float = 1E10 ) -> int:
"""simple docstring"""
_a : Optional[int] = sieve()
_a : List[str] = 1
while True:
_a : Optional[int] = next(__a )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__a )
n += 2
if __name__ == "__main__":
print(solution())
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Dict:
_a : int = tempfile.mkdtemp()
_a : List[Any] = BlipImageProcessor()
_a : Optional[Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
_a : Tuple = BlipProcessor(_a , _a )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self , **_a ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def __lowercase ( self , **_a ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def __lowercase ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : List[str] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> Tuple:
_a : List[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : Union[str, Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Optional[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_tokenizer()
_a : Dict = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : str = image_processor(_a , return_tensors='''np''' )
_a : Tuple = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_image_processor()
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = '''lower newer'''
_a : Dict = processor(text=_a )
_a : str = tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[int] = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Tuple = self.prepare_image_inputs()
_a : Tuple = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> str:
_a : List[str] = self.get_image_processor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : str = processor.batch_decode(_a )
_a : Optional[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_tokenizer()
_a : str = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : str = '''lower newer'''
_a : Tuple = self.prepare_image_inputs()
_a : str = processor(text=_a , images=_a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
a__ = {str(digit): digit**5 for digit in range(10)}
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__a ) )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_000 ,1_000_000 )
if number == digits_fifth_powers_sum(__a ) )
if __name__ == "__main__":
print(solution())
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCAmelCase ( __a : Callable[[int | float], int | float] ,__a : int | float ,__a : int | float ,__a : int = 100 ,) -> float:
"""simple docstring"""
_a : Tuple = x_start
_a : List[str] = fnc(__a )
_a : Optional[int] = 0.0
for _ in range(__a ):
# Approximates curve as a sequence of linear lines and sums their length
_a : Dict = (x_end - x_start) / steps + xa
_a : Dict = fnc(__a )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
_a : Optional[Any] = xa
_a : Any = fxa
return length
if __name__ == "__main__":
def __UpperCAmelCase ( __a : Any ) -> Dict:
"""simple docstring"""
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
a__ = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-1_2 , _a=[5_1_2, 8_6_4] , _a=1_6 , _a=3 , _a=True , _a=1_0_0 , _a=True , _a=False , _a=1 , _a=5 , _a=2 , _a=5 , _a=2 , _a=0.1 , **_a , ) -> Optional[Any]:
super().__init__(**_a )
_a : Dict = hidden_size
_a : int = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Any = intermediate_size
_a : Dict = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Optional[int] = initializer_range
_a : Dict = layer_norm_eps
_a : List[Any] = image_size
_a : Dict = patch_size
_a : Any = num_channels
_a : Tuple = qkv_bias
_a : Optional[int] = num_detection_tokens
_a : int = use_mid_position_embeddings
_a : Dict = auxiliary_loss
# Hungarian matcher
_a : List[str] = class_cost
_a : Optional[int] = bbox_cost
_a : List[Any] = giou_cost
# Loss coefficients
_a : Dict = bbox_loss_coefficient
_a : List[str] = giou_loss_coefficient
_a : List[str] = eos_coefficient
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowercase ( self ) -> float:
return 1e-4
@property
def __lowercase ( self ) -> int:
return 1_2
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a__ = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
a__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a__ = dict(zip(vocab, range(len(vocab))))
a__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(tmpdirname)
a__ = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
a__ = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
a__ = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
a__ = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a__ = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a__ = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
a__ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
a__ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
def __UpperCAmelCase ( __a : List[Any] ) -> Tuple:
"""simple docstring"""
_a : str = []
_a : Union[str, Any] = set({'''(''', '''[''', '''{'''} )
_a : List[Any] = set({''')''', ''']''', '''}'''} )
_a : str = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(__a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__a ) == 0 or (len(__a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__a ) == 0
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = input('''Enter sequence of brackets: ''' )
if is_balanced(__a ):
print(__a ,'''is balanced''' )
else:
print(__a ,'''is not balanced''' )
if __name__ == "__main__":
main()
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a=None , _a=None , _a=None , **_a ) -> str:
if tokenize_kwargs is None:
_a : Any = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
_a : Dict = truncation
_a : List[Any] = tokenize_kwargs
_a : str = {}
if return_tensors is not None:
_a : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def __lowercase ( self , _a , **_a ) -> Dict[str, GenericTensor]:
_a : Optional[Any] = self.framework
_a : Any = self.tokenizer(_a , return_tensors=_a , **_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[int]:
_a : Any = self.model(**_a )
return model_outputs
def __lowercase ( self , _a , _a=False ) -> int:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_a , **_a ) -> Tuple:
return super().__call__(*_a , **_a )
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a__ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=3 , _a=1_8 , _a=3_0 , _a=4_0_0 , _a=None , _a=True , _a=True , _a=None , ) -> Union[str, Any]:
_a : Tuple = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
_a : Optional[Any] = parent
_a : Optional[int] = batch_size
_a : List[Any] = num_channels
_a : List[Any] = image_size
_a : int = min_resolution
_a : Tuple = max_resolution
_a : Tuple = size
_a : Tuple = do_normalize
_a : Dict = do_convert_rgb
_a : List[str] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_a : Any = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
def __lowercase ( self ) -> List[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase ( self ) -> Optional[Any]:
_a : List[Any] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_a : int = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = PixaStructImageProcessingTester(self )
@property
def __lowercase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self ) -> Tuple:
_a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = self.image_processor_tester.prepare_dummy_image()
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_a : Any = 2_0_4_8
_a : Dict = image_processor(_a , return_tensors='''pt''' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowercase ( self ) -> str:
# Initialize image_processor
_a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_a : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a : Union[str, Any] = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize image_processor
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_a : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_a : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_a : Dict = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
_a : Any = '''Hello'''
_a : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a : int = image_processor(
_a , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase ( self ) -> Tuple:
# Initialize image_processor
_a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_a : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a : List[Any] = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase ( self ) -> Tuple:
# Initialize image_processor
_a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_a : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a : Any = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = PixaStructImageProcessor if is_vision_available() else None
def __lowercase ( self ) -> Optional[Any]:
_a : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_a : Tuple = 3
@property
def __lowercase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self ) -> Any:
_a : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __lowercase ( self ) -> Dict:
# Initialize image_processor
_a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_a : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Dict = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a : str = image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
a__ = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = UNetaDModel
UpperCAmelCase__ : Optional[Any] = "sample"
@property
def __lowercase ( self ) -> str:
_a : Dict = 4
_a : Tuple = 3
_a : List[Any] = (3_2, 3_2)
_a : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_a )
_a : Tuple = torch.tensor([1_0] ).to(_a )
return {"sample": noise, "timestep": time_step}
@property
def __lowercase ( self ) -> Any:
return (3, 3_2, 3_2)
@property
def __lowercase ( self ) -> Union[str, Any]:
return (3, 3_2, 3_2)
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = {
'''block_out_channels''': (3_2, 6_4),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 3_2,
}
_a : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = UNetaDModel
UpperCAmelCase__ : Any = "sample"
@property
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = 4
_a : str = 4
_a : Dict = (3_2, 3_2)
_a : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_a )
_a : int = torch.tensor([1_0] ).to(_a )
return {"sample": noise, "timestep": time_step}
@property
def __lowercase ( self ) -> List[str]:
return (4, 3_2, 3_2)
@property
def __lowercase ( self ) -> Optional[int]:
return (4, 3_2, 3_2)
def __lowercase ( self ) -> List[Any]:
_a : Any = {
'''sample_size''': 3_2,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (3_2, 6_4),
'''attention_head_dim''': 3_2,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
_a : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self ) -> Any:
_a , _a : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_a )
_a : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def __lowercase ( self ) -> List[Any]:
_a , _a : List[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_a )
model.to(_a )
_a : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def __lowercase ( self ) -> Optional[Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_a , _a : str = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_a )
model_accelerate.to(_a )
model_accelerate.eval()
_a : List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_a : Optional[Any] = noise.to(_a )
_a : str = torch.tensor([1_0] * noise.shape[0] ).to(_a )
_a : Union[str, Any] = model_accelerate(_a , _a )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_a , _a : Any = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_a , low_cpu_mem_usage=_a )
model_normal_load.to(_a )
model_normal_load.eval()
_a : str = model_normal_load(_a , _a )['''sample''']
assert torch_all_close(_a , _a , rtol=1e-3 )
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_a )
_a : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_a : Dict = noise.to(_a )
_a : List[str] = torch.tensor([1_0] * noise.shape[0] ).to(_a )
with torch.no_grad():
_a : int = model(_a , _a ).sample
_a : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_a : Optional[int] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_a , _a , rtol=1e-3 ) )
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = UNetaDModel
UpperCAmelCase__ : int = "sample"
@property
def __lowercase ( self , _a=(3_2, 3_2) ) -> Optional[Any]:
_a : Optional[int] = 4
_a : str = 3
_a : int = floats_tensor((batch_size, num_channels) + sizes ).to(_a )
_a : Optional[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=_a )
return {"sample": noise, "timestep": time_step}
@property
def __lowercase ( self ) -> Any:
return (3, 3_2, 3_2)
@property
def __lowercase ( self ) -> List[str]:
return (3, 3_2, 3_2)
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = {
'''block_out_channels''': [3_2, 6_4, 6_4, 6_4],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
_a : int = self.dummy_input
return init_dict, inputs_dict
@slow
def __lowercase ( self ) -> List[Any]:
_a , _a : Union[str, Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_a )
_a : str = self.dummy_input
_a : Any = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(_a )
_a : List[Any] = noise
_a : str = model(**_a )
assert image is not None, "Make sure output is not None"
@slow
def __lowercase ( self ) -> str:
_a : Any = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_a )
_a : str = 4
_a : str = 3
_a : Dict = (2_5_6, 2_5_6)
_a : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_a )
_a : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_a )
with torch.no_grad():
_a : Union[str, Any] = model(_a , _a ).sample
_a : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_a : str = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_a , _a , rtol=1e-2 ) )
def __lowercase ( self ) -> Dict:
_a : List[str] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_a )
_a : str = 4
_a : List[Any] = 3
_a : Any = (3_2, 3_2)
_a : Dict = torch.ones((batch_size, num_channels) + sizes ).to(_a )
_a : int = torch.tensor(batch_size * [1e-4] ).to(_a )
with torch.no_grad():
_a : List[Any] = model(_a , _a ).sample
_a : str = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_a : Any = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_a , _a , rtol=1e-2 ) )
def __lowercase ( self ) -> Dict:
# not required for this model
pass
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> List[str]:
_a : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _a ) -> Any:
_a , _a , _a , _a : Dict = hidden_states.shape
_a : int = jax.image.resize(
_a , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_a : Optional[int] = self.conv(_a )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> Optional[Any]:
_a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _a ) -> str:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_a : str = self.conv(_a )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int = None
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> str:
_a : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
_a : Tuple = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
_a : Dict = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_a : List[str] = nn.Dense(_a , dtype=self.dtype )
_a : Any = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
_a : Tuple = nn.Dropout(self.dropout_prob )
_a : List[Any] = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_a : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_a : Union[str, Any] = None
if use_nin_shortcut:
_a : List[str] = nn.Conv(
_a , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , _a , _a , _a=True ) -> Dict:
_a : List[Any] = hidden_states
_a : Tuple = self.norma(_a )
_a : Dict = nn.swish(_a )
_a : str = self.conva(_a )
_a : List[Any] = self.time_emb_proj(nn.swish(_a ) )
_a : Optional[int] = jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 )
_a : Optional[int] = hidden_states + temb
_a : List[str] = self.norma(_a )
_a : Tuple = nn.swish(_a )
_a : Optional[int] = self.dropout(_a , _a )
_a : Tuple = self.conva(_a )
if self.conv_shortcut is not None:
_a : Any = self.conv_shortcut(_a )
return hidden_states + residual
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
def __UpperCAmelCase ( __a : list ) -> list:
"""simple docstring"""
if len(__a ) <= 1:
return lst
_a : Optional[Any] = 1
while i < len(__a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_a , _a : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_a : Dict = 1
return lst
if __name__ == "__main__":
a__ = input('''Enter numbers separated by a comma:\n''').strip()
a__ = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
def __UpperCAmelCase ( __a : int ,__a : float ,__a : float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **_a ) -> Optional[int]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_a : List[str] = deprecated_arg[3:]
_a : int = not kwargs.pop(_a )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_a : Dict = kwargs.pop('''tpu_name''' , self.tpu_name )
_a : Dict = kwargs.pop('''device_idx''' , self.device_idx )
_a : Dict = kwargs.pop('''eager_mode''' , self.eager_mode )
_a : List[str] = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_a )
UpperCAmelCase__ : str = field(
default=__lowercase , metadata={"help": "Name of TPU"} , )
UpperCAmelCase__ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Benchmark models in eager model."} )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __lowercase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
_a : int = None
if self.tpu:
try:
if self.tpu_name:
_a : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_a : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_a : Optional[int] = None
return tpu
@cached_property
def __lowercase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_a : Dict = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
_a : Optional[Any] = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
_a : Any = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def __lowercase ( self ) -> bool:
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def __lowercase ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def __lowercase ( self ) -> Any:
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __lowercase ( self ) -> int:
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowercase ( self ) -> bool:
return self.n_gpu > 0
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a__ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a__ = '''>>zh<<'''
a__ = '''Helsinki-NLP/'''
if is_torch_available():
a__ = '''pt'''
elif is_tf_available():
a__ = '''tf'''
else:
a__ = '''jax'''
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = MarianTokenizer
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> List[Any]:
super().setUp()
_a : List[str] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
_a : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_a : Dict = Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_a , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
_a : List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self , **_a ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def __lowercase ( self ) -> Dict:
_a : Tuple = '''</s>'''
_a : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> Tuple:
_a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_a ) , 9 )
def __lowercase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
_a : List[str] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_a )
self.assertIsInstance(_a , _a )
_a : str = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(_a , batch.input_ids[0] )
_a : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_a )
_a : Optional[Any] = [x.name for x in Path(_a ).glob('''*''' )]
self.assertIn('''source.spm''' , _a )
MarianTokenizer.from_pretrained(_a )
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = self.get_tokenizer()
_a : List[str] = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_tokenizer()
_a : List[Any] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __lowercase ( self ) -> Dict:
# fmt: off
_a : List[str] = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
_a : Any = '''Tämä on testi'''
_a : List[Any] = '''This is a test'''
_a : Tuple = [7_6, 7, 2_0_4_7, 2]
_a : Union[str, Any] = [6_9, 1_2, 1_1, 9_4_0, 2]
_a : str = tokenizer(_a ).input_ids
self.assertListEqual(_a , _a )
_a : str = tokenizer(text_target=_a ).input_ids
self.assertListEqual(_a , _a )
_a : List[str] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
def __UpperCAmelCase ( __a : list[int] ,__a : int ) -> bool:
"""simple docstring"""
_a : List[str] = len(__a )
_a : Optional[int] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : Dict = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Union[str, Any] = subset[i - 1][j]
if arr[i - 1] <= j:
_a : Tuple = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a__ = get_tests_dir('''fixtures''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
_a : Dict = mock.Mock()
_a : List[Any] = 5_0_0
_a : Optional[Any] = {}
_a : Any = HTTPError
_a : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_a : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
_a : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
_a : List[str] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls ) -> Tuple:
_a : Optional[int] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls ) -> int:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowercase ( self ) -> str:
_a : int = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
_a : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''test-feature-extractor''' , push_to_hub=_a , use_auth_token=self._token )
_a : Dict = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> List[Any]:
_a : int = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
_a : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_a , use_auth_token=self._token )
_a : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
_a : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
_a : str = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a__ = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def __UpperCAmelCase ( __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a : Union[str, Any] = test_results.split(''' ''' )
_a : Any = 0
_a : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a : Optional[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __UpperCAmelCase ( __a : Tuple ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = {}
_a : Optional[int] = None
_a : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' ,__a ):
_a : List[str] = True
_a : List[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
_a : int = line
_a : str = False
return failures
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a ) -> str:
_a : Optional[Any] = title
_a : int = doc_test_results['''time_spent'''].split(''',''' )[0]
_a : List[str] = doc_test_results['''success''']
_a : Any = doc_test_results['''failures''']
_a : Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a : Union[str, Any] = doc_test_results
@property
def __lowercase ( self ) -> str:
_a : Optional[int] = [self._time_spent]
_a : str = 0
for time in time_spent:
_a : Tuple = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_a : List[Any] = [0, 0, time_parts[0]]
_a , _a , _a : Optional[int] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
_a , _a , _a : int = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F"""{int(_a )}h{int(_a )}m{int(_a )}s"""
@property
def __lowercase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowercase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = 4_0
_a : List[Any] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_a , _a )}
_a : List[Any] = ''''''
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __lowercase ( self ) -> str:
_a : Union[str, Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def __lowercase ( ) -> Any:
_a : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_a , )
def __lowercase ( self ) -> Dict:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
_a : Optional[Any] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
_a : int = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_a , )
def __lowercase ( self , _a , _a , _a , _a ) -> List[Any]:
_a : str = ''''''
for key, value in failures.items():
_a : int = value[:2_0_0] + ''' [Truncated]''' if len(_a ) > 2_5_0 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
_a : int = job_name
_a : int = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
_a : str = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowercase ( self ) -> Tuple:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
_a : Union[str, Any] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
_a : Any = sorted(self.doc_test_results.items() , key=lambda _a : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
_a : List[Any] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
_a : List[str] = job_result['''failures''']
_a : Optional[Any] = self.get_reply_blocks(_a , _a , _a , text=_a )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"""Results for {job}""" , blocks=_a , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_a : Union[str, Any] = os.environ['''GITHUB_RUN_ID''']
_a : Tuple = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_a : str = requests.get(__a ).json()
_a : Union[str, Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_a : Dict = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__a ):
_a : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' ,__a )
return {}
def __UpperCAmelCase ( __a : str ) -> Union[str, Any]:
"""simple docstring"""
_a : List[str] = {}
if os.path.exists(__a ):
_a : Tuple = os.listdir(__a )
for file in files:
try:
with open(os.path.join(__a ,__a ) ,encoding='''utf-8''' ) as f:
_a : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(__a ,__a )}.""" ) from e
return _artifact
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> Optional[int]:
_a : Dict = name
_a : str = []
def __str__( self ) -> Dict:
return self.name
def __lowercase ( self , _a ) -> List[str]:
self.paths.append({'''name''': self.name, '''path''': path} )
_a : Dict[str, Artifact] = {}
_a : List[Any] = filter(os.path.isdir ,os.listdir() )
for directory in directories:
_a : Any = directory
if artifact_name not in _available_artifacts:
_a : List[str] = Artifact(__a )
_available_artifacts[artifact_name].add_path(__a )
return _available_artifacts
if __name__ == "__main__":
a__ = get_job_links()
a__ = retrieve_available_artifacts()
a__ = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a__ = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a__ = github_actions_job_links.get('''run_doctests''')
a__ = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a__ = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a__ , a__ , a__ = handle_test_results(artifact['''stats'''])
a__ = failed
a__ = success
a__ = time_spent[1:-1] + ''', '''
a__ = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a__ = line.replace('''FAILED ''', '''''')
a__ = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a__ , a__ = line.split('''::''')
else:
a__ , a__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a__ = all_failures[test] if test in all_failures else '''N/A'''
a__ = failure
break
a__ = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
import math
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a=0 ) -> int: # a graph with Node 0,1,...,N-1
_a : List[str] = n
_a : Any = [
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # adjacency matrix for weight
_a : Tuple = [
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # dp[i][j] stores minimum distance from i to j
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : int = w
def __lowercase ( self ) -> str:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_a : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowercase ( self , _a , _a ) -> Union[str, Any]:
return self.dp[u][v]
if __name__ == "__main__":
a__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
_a : List[Any] = 0
while len(__a ) > 1:
_a : Optional[int] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_a : Dict = files.index(min(__a ) )
temp += files[min_index]
files.pop(__a )
files.append(__a )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __UpperCAmelCase ( __a : Tuple ) -> Optional[int]:
"""simple docstring"""
_a : List[Any] = SwinvaConfig()
_a : Optional[Any] = swinva_name.split('''_''' )
_a : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
_a : List[str] = int(name_split[3][-3:] )
else:
_a : Dict = int(name_split[3] )
if "to" in name_split[2]:
_a : List[Any] = int(name_split[2][-2:] )
else:
_a : Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
_a : Tuple = 96
_a : Optional[Any] = (2, 2, 6, 2)
_a : Any = (3, 6, 12, 24)
elif model_size == "small":
_a : Dict = 96
_a : Dict = (2, 2, 18, 2)
_a : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
_a : Dict = 128
_a : str = (2, 2, 18, 2)
_a : Union[str, Any] = (4, 8, 16, 32)
else:
_a : Dict = 192
_a : List[str] = (2, 2, 18, 2)
_a : Any = (6, 12, 24, 48)
if "to" in swinva_name:
_a : Optional[Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_a : int = 21_841
_a : str = '''huggingface/label-files'''
_a : int = '''imagenet-22k-id2label.json'''
_a : Any = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Any = {int(__a ): v for k, v in idalabel.items()}
_a : str = idalabel
_a : Any = {v: k for k, v in idalabel.items()}
else:
_a : Optional[Any] = 1_000
_a : str = '''huggingface/label-files'''
_a : Optional[int] = '''imagenet-1k-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Any = {int(__a ): v for k, v in idalabel.items()}
_a : int = idalabel
_a : Dict = {v: k for k, v in idalabel.items()}
_a : Dict = img_size
_a : Any = num_classes
_a : str = embed_dim
_a : Dict = depths
_a : Optional[Any] = num_heads
_a : Optional[Any] = window_size
return config
def __UpperCAmelCase ( __a : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
_a : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a : Tuple = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
_a : Union[str, Any] = '''encoder.''' + name
if "attn.proj" in name:
_a : int = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
_a : int = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
_a : Any = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
_a : Optional[Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
_a : Any = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
_a : str = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
_a : Dict = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
_a : List[Any] = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
_a : Dict = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
_a : List[Any] = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
_a : str = '''layernorm.weight'''
if name == "norm.bias":
_a : Optional[Any] = '''layernorm.bias'''
if "head" in name:
_a : Any = name.replace('''head''' ,'''classifier''' )
else:
_a : Optional[int] = '''swinv2.''' + name
return name
def __UpperCAmelCase ( __a : Tuple ,__a : Optional[int] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a : Any = orig_state_dict.pop(__a )
if "mask" in key:
continue
elif "qkv" in key:
_a : List[Any] = key.split('''.''' )
_a : Union[str, Any] = int(key_split[1] )
_a : Optional[int] = int(key_split[3] )
_a : List[str] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a : Any = val[:dim, :]
_a : Tuple = val[dim : dim * 2, :]
_a : Optional[Any] = val[-dim:, :]
else:
_a : Optional[int] = val[:dim]
_a : List[str] = val[
dim : dim * 2
]
_a : Any = val[-dim:]
else:
_a : List[str] = val
return orig_state_dict
def __UpperCAmelCase ( __a : List[Any] ,__a : Dict ) -> Any:
"""simple docstring"""
_a : Any = timm.create_model(__a ,pretrained=__a )
timm_model.eval()
_a : Optional[int] = get_swinva_config(__a )
_a : str = SwinvaForImageClassification(__a )
model.eval()
_a : Union[str, Any] = convert_state_dict(timm_model.state_dict() ,__a )
model.load_state_dict(__a )
_a : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
_a : Tuple = Image.open(requests.get(__a ,stream=__a ).raw )
_a : Dict = image_processor(images=__a ,return_tensors='''pt''' )
_a : Any = timm_model(inputs['''pixel_values'''] )
_a : List[str] = model(**__a ).logits
assert torch.allclose(__a ,__a ,atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
model.push_to_hub(
repo_path_or_name=Path(__a ,__a ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> None:
_a : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_a : Optional[int] = False
def __lowercase ( self , _a ) -> None:
for word in words:
self.insert(_a )
def __lowercase ( self , _a ) -> None:
_a : Tuple = self
for char in word:
if char not in curr.nodes:
_a : Union[str, Any] = TrieNode()
_a : str = curr.nodes[char]
_a : Any = True
def __lowercase ( self , _a ) -> bool:
_a : Tuple = self
for char in word:
if char not in curr.nodes:
return False
_a : Dict = curr.nodes[char]
return curr.is_leaf
def __lowercase ( self , _a ) -> None:
def _delete(_a , _a , _a ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
_a : List[str] = False
return len(curr.nodes ) == 0
_a : Union[str, Any] = word[index]
_a : Dict = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : Tuple = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def __UpperCAmelCase ( __a : TrieNode ,__a : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(__a ,end=''' ''' )
for key, value in node.nodes.items():
print_words(__a ,word + key )
def __UpperCAmelCase ( ) -> bool:
"""simple docstring"""
_a : int = '''banana bananas bandana band apple all beast'''.split()
_a : Union[str, Any] = TrieNode()
root.insert_many(__a )
# print_words(root, "")
assert all(root.find(__a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __UpperCAmelCase ( __a : str ,__a : bool ) -> None:
"""simple docstring"""
print(str(__a ) ,'''works!''' if passes else '''doesn\'t work :(''' )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert test_trie()
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
print_results('''Testing trie functionality''' ,test_trie() )
if __name__ == "__main__":
main()
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCAmelCase ( __a : str ) -> Any:
"""simple docstring"""
_a : Tuple = {}
_a : Tuple = job['''started_at''']
_a : List[str] = job['''completed_at''']
_a : int = date_parser.parse(__a )
_a : Tuple = date_parser.parse(__a )
_a : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_a : List[str] = start
_a : Optional[int] = end
_a : Any = duration_in_min
return job_info
def __UpperCAmelCase ( __a : Tuple ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
_a : List[str] = None
if token is not None:
_a : List[str] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
_a : Tuple = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_a : List[Any] = requests.get(__a ,headers=__a ).json()
_a : Tuple = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(__a ) for job in result['''jobs''']} )
_a : List[str] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__a ):
_a : Union[str, Any] = requests.get(url + F"""&page={i + 2}""" ,headers=__a ).json()
job_time.update({job['''name''']: extract_time_from_single_job(__a ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
a__ = parser.parse_args()
a__ = get_job_time(args.workflow_run_id)
a__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a__ = logging.get_logger(__name__)
a__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
a__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __UpperCAmelCase ( __a : str ) -> Union[str, Any]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(__a )
_a : Tuple = importlib.import_module(F""".{module_name}""" ,'''transformers.models''' )
try:
return getattr(__a ,__a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__a ,'''__name__''' ,__a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Optional[int] = importlib.import_module('''transformers''' )
if hasattr(__a ,__a ):
return getattr(__a ,__a )
return None
def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,**__a : Dict ,) -> Tuple:
"""simple docstring"""
_a : Optional[int] = get_file_from_repo(
__a ,__a ,cache_dir=__a ,force_download=__a ,resume_download=__a ,proxies=__a ,use_auth_token=__a ,revision=__a ,local_files_only=__a ,)
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__a ,encoding='''utf-8''' ) as reader:
return json.load(__a )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_a )
def __lowercase ( cls , _a , **_a ) -> Optional[Any]:
_a : Optional[Any] = kwargs.pop('''config''' , _a )
_a : Optional[Any] = kwargs.pop('''trust_remote_code''' , _a )
_a : Any = True
_a , _a : Any = ImageProcessingMixin.get_image_processor_dict(_a , **_a )
_a : Optional[int] = config_dict.get('''image_processor_type''' , _a )
_a : Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_a : Optional[Any] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : int = config_dict.pop('''feature_extractor_type''' , _a )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_a : str = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_a : int = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_a : Optional[int] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_a , _a ):
_a : List[Any] = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.image_processor_type``
_a : Dict = getattr(_a , '''image_processor_type''' , _a )
if hasattr(_a , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_a : Optional[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_a : List[Any] = image_processor_class_from_name(_a )
_a : int = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_a ) in IMAGE_PROCESSOR_MAPPING
_a : Any = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_a : Any = get_class_from_dynamic_module(
_a , _a , **_a )
_a : Dict = kwargs.pop('''code_revision''' , _a )
if os.path.isdir(_a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_a , **_a )
elif image_processor_class is not None:
return image_processor_class.from_dict(_a , **_a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_a ) in IMAGE_PROCESSOR_MAPPING:
_a : str = IMAGE_PROCESSOR_MAPPING[type(_a )]
return image_processor_class.from_dict(_a , **_a )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _a , _a ) -> Dict:
IMAGE_PROCESSOR_MAPPING.register(_a , _a )
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=9_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_2_8 , _a=3_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> int:
_a : Any = parent
_a : Any = batch_size
_a : int = seq_length
_a : List[str] = is_training
_a : List[Any] = use_input_mask
_a : Any = use_token_type_ids
_a : int = use_labels
_a : Dict = vocab_size
_a : int = hidden_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : List[str] = hidden_act
_a : str = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : Tuple = type_vocab_size
_a : int = type_sequence_label_size
_a : Union[str, Any] = initializer_range
_a : List[Any] = num_labels
_a : Any = num_choices
_a : int = scope
def __lowercase ( self ) -> str:
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : str = random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] = None
if self.use_token_type_ids:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Tuple = None
_a : int = None
_a : Union[str, Any] = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : str = ids_tensor([self.batch_size] , self.num_choices )
_a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> Any:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __lowercase ( self ) -> Any:
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : int = self.prepare_config_and_inputs()
_a : str = True
_a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
_a : Any = NezhaModel(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a , attention_mask=_a , token_type_ids=_a )
_a : Optional[int] = model(_a , token_type_ids=_a )
_a : List[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
_a : List[Any] = True
_a : Optional[int] = NezhaModel(_a )
model.to(_a )
model.eval()
_a : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_a : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
_a : Any = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
_a : Optional[Any] = NezhaForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : str = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
_a : List[Any] = NezhaForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Any = NezhaForPreTraining(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Optional[Any] = NezhaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_a : int = self.num_labels
_a : List[str] = NezhaForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
_a : List[Any] = self.num_labels
_a : Any = NezhaForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
_a : List[str] = self.num_choices
_a : Tuple = NezhaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> List[str]:
_a : str = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
def __lowercase ( self , _a , _a , _a=False ) -> List[Any]:
_a : Any = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
_a : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
_a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def __lowercase ( self ) -> str:
_a : str = NezhaModelTester(self )
_a : Optional[Any] = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Any:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Any:
_a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def __lowercase ( self ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
_a : int = None
self.model_tester.create_and_check_model_as_decoder(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
def __lowercase ( self ) -> Tuple:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __lowercase ( self ) -> List[str]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_a )
def __lowercase ( self ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __lowercase ( self ) -> Dict:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __lowercase ( self ) -> Optional[Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __lowercase ( self ) -> Tuple:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = NezhaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@slow
@require_torch_gpu
def __lowercase ( self ) -> Optional[Any]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_a : List[Any] = True
_a : List[Any] = model_class(config=_a )
_a : Optional[int] = self._prepare_for_class(_a , _a )
_a : Tuple = torch.jit.trace(
_a , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_a , os.path.join(_a , '''bert.pt''' ) )
_a : Optional[Any] = torch.jit.load(os.path.join(_a , '''bert.pt''' ) , map_location=_a )
loaded(inputs_dict['''input_ids'''].to(_a ) , inputs_dict['''attention_mask'''].to(_a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> str:
_a : Union[str, Any] = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
_a : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : str = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : List[Any] = model(_a , attention_mask=_a )[0]
_a : Any = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , _a )
_a : List[str] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[int] = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
_a : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : int = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a )[0]
_a : Optional[int] = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , _a )
_a : Optional[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
a__ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
a__ = {
'''ctrl''': 256,
}
a__ = {
'''Pregnancy''': 168629,
'''Christianity''': 7675,
'''Explain''': 106423,
'''Fitness''': 63440,
'''Saving''': 63163,
'''Ask''': 27171,
'''Ass''': 95985,
'''Joke''': 163509,
'''Questions''': 45622,
'''Thoughts''': 49605,
'''Retail''': 52342,
'''Feminism''': 164338,
'''Writing''': 11992,
'''Atheism''': 192263,
'''Netflix''': 48616,
'''Computing''': 39639,
'''Opinion''': 43213,
'''Alone''': 44967,
'''Funny''': 58917,
'''Gaming''': 40358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 77138,
'''Diet''': 36206,
'''Legal''': 11859,
'''Norman''': 4939,
'''Tip''': 72689,
'''Weight''': 52343,
'''Movies''': 46273,
'''Running''': 23425,
'''Science''': 2090,
'''Horror''': 37793,
'''Confession''': 60572,
'''Finance''': 12250,
'''Politics''': 16360,
'''Scary''': 191985,
'''Support''': 12654,
'''Technologies''': 32516,
'''Teenage''': 66160,
'''Event''': 32769,
'''Learned''': 67460,
'''Notion''': 182770,
'''Wikipedia''': 37583,
'''Books''': 6665,
'''Extract''': 76050,
'''Confessions''': 102701,
'''Conspiracy''': 75932,
'''Links''': 63674,
'''Narcissus''': 150425,
'''Relationship''': 54766,
'''Relationships''': 134796,
'''Reviews''': 41671,
'''News''': 4256,
'''Translation''': 26820,
'''multilingual''': 128406,
}
def __UpperCAmelCase ( __a : Tuple ) -> str:
"""simple docstring"""
_a : Union[str, Any] = set()
_a : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a : Union[str, Any] = char
_a : int = set(__a )
return pairs
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = CONTROL_CODES
def __init__( self , _a , _a , _a="<unk>" , **_a ) -> Optional[Any]:
super().__init__(unk_token=_a , **_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
_a : str = json.load(_a )
_a : List[Any] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
_a : List[Any] = merges_handle.read().split('''\n''' )[1:-1]
_a : List[Any] = [tuple(merge.split() ) for merge in merges]
_a : Tuple = dict(zip(_a , range(len(_a ) ) ) )
_a : str = {}
@property
def __lowercase ( self ) -> List[str]:
return len(self.encoder )
def __lowercase ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase ( self , _a ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
_a : Tuple = tuple(_a )
_a : Any = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_a : Dict = get_pairs(_a )
if not pairs:
return token
while True:
_a : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a : Union[str, Any] = bigram
_a : List[str] = []
_a : Dict = 0
while i < len(_a ):
try:
_a : Optional[Any] = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_a : str = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a : List[str] = tuple(_a )
_a : Optional[Any] = new_word
if len(_a ) == 1:
break
else:
_a : Union[str, Any] = get_pairs(_a )
_a : int = '''@@ '''.join(_a )
_a : Optional[Any] = word[:-4]
_a : Any = word
return word
def __lowercase ( self , _a ) -> Dict:
_a : Tuple = []
_a : Any = re.findall(R'''\S+\n?''' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(''' ''' ) ) )
return split_tokens
def __lowercase ( self , _a ) -> List[Any]:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __lowercase ( self , _a ) -> List[Any]:
return self.decoder.get(_a , self.unk_token )
def __lowercase ( self , _a ) -> List[Any]:
_a : Dict = ''' '''.join(_a ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Any = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Union[str, Any] = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
_a : str = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_a : Tuple = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
a__ = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ) -> None:
_a : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_a : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_a : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_a : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
_a : Optional[int] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_a : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : Optional[Any] = vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __lowercase ( self ) -> Any:
return self.sp_model.get_piece_size()
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
_a : str = self.__dict__.copy()
_a : List[Any] = None
return state
def __setstate__( self , _a ) -> Tuple:
_a : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Optional[Any] = {}
_a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> Optional[int]:
return self.sp_model.piece_to_id(_a )
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : Optional[int] = self.sp_model.IdToPiece(_a )
return token
def __lowercase ( self , _a ) -> str:
_a : List[Any] = []
_a : Optional[int] = ''''''
_a : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_a : Any = True
_a : Optional[Any] = []
else:
current_sub_tokens.append(_a )
_a : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __lowercase ( self , _a , _a = False , _a = None , _a = True , **_a , ) -> str:
_a : List[str] = kwargs.pop('''use_source_tokenizer''' , _a )
_a : str = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a : str = []
_a : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
_a : List[Any] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a : int = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_a ) )
else:
_a : Any = ''''''.join(_a )
_a : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a : Any = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Optional[int] = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __lowercase ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
_a : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : List[Any] = [self.sep_token_id]
_a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from sklearn.metrics import recall_score
import datasets
a__ = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
a__ = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
a__ = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def __lowercase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ) -> int:
_a : Optional[Any] = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "luke"
def __init__( self , _a=5_0_2_6_7 , _a=5_0_0_0_0_0 , _a=7_6_8 , _a=2_5_6 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=True , _a=None , _a=1 , _a=0 , _a=2 , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : List[Any] = vocab_size
_a : List[Any] = entity_vocab_size
_a : Optional[int] = hidden_size
_a : Optional[Any] = entity_emb_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Any = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : Optional[Any] = type_vocab_size
_a : int = initializer_range
_a : List[str] = layer_norm_eps
_a : Dict = use_entity_aware_attention
_a : Optional[int] = classifier_dropout
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : List[str] = ArgumentParser('''Diffusers CLI tool''' ,usage='''diffusers-cli <command> [<args>]''' )
_a : Optional[Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__a )
# Let's go
_a : Tuple = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
_a : Union[str, Any] = args.func(__a )
service.run()
if __name__ == "__main__":
main()
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
def __UpperCAmelCase ( __a : List[str] ,__a : List[str] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__a ,n - 1 ,__a ) * a) % mod
else:
_a : Optional[int] = binary_exponentiation(__a ,n / 2 ,__a )
return (b * b) % mod
# a prime number
a__ = 701
a__ = 1000000000
a__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
def __UpperCAmelCase ( __a : float ,__a : float ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__a ) * abs(__a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a__ = datasets.load_iris()
a__ = np.array(data['''data'''])
a__ = np.array(data['''target'''])
a__ = data['''target_names''']
a__ , a__ , a__ , a__ = train_test_split(X, y)
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Dict ) -> str:
"""simple docstring"""
return np.linalg.norm(np.array(__a ) - np.array(__a ) )
def __UpperCAmelCase ( __a : str ,__a : Dict ,__a : List[str] ,__a : Dict ,__a : Union[str, Any]=5 ) -> Tuple:
"""simple docstring"""
_a : Tuple = zip(__a ,__a )
# List of distances of all points from the point to be classified
_a : Optional[Any] = []
for data_point in data:
_a : int = euclidean_distance(data_point[0] ,__a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a : List[str] = [i[1] for i in sorted(__a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a : List[str] = Counter(__a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(__a ,x % y )
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(__a ,__a )
def __UpperCAmelCase ( __a : int = 20 ) -> int:
"""simple docstring"""
_a : str = 1
for i in range(1 ,n + 1 ):
_a : Tuple = lcm(__a ,__a )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
import re
from filelock import FileLock
try:
import nltk
a__ = True
except (ImportError, ModuleNotFoundError):
a__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
re.sub('''<n>''' ,'''''' ,__a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def __UpperCAmelCase ( __a : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : int ) -> np.ndarray:
"""simple docstring"""
_a : Dict = np.nan
for i in range(__a ):
_a : Union[str, Any] = features[:, labels == i]
_a : int = data.mean(1 )
# Centralize the data of class i
_a : Any = data - column_reshape(__a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__a ,centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_a : List[str] = np.dot(__a ,centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : int ) -> np.ndarray:
"""simple docstring"""
_a : List[Any] = features.mean(1 )
_a : str = np.nan
for i in range(__a ):
_a : Dict = features[:, labels == i]
_a : Union[str, Any] = data.shape[1]
_a : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__a ) - column_reshape(__a ) ,(column_reshape(__a ) - column_reshape(__a )).T ,)
else:
# If covariance_sum is np.nan (i.e. first loop)
_a : str = device_data * np.dot(
column_reshape(__a ) - column_reshape(__a ) ,(column_reshape(__a ) - column_reshape(__a )).T ,)
return covariance_sum / features.shape[1]
def __UpperCAmelCase ( __a : np.ndarray ,__a : int ) -> np.ndarray:
"""simple docstring"""
if features.any():
_a : Dict = features.mean(1 )
# Center the dataset
_a : Optional[int] = features - np.reshape(__a ,(data_mean.size, 1) )
_a : Tuple = np.dot(__a ,centered_data.T ) / features.shape[1]
_a , _a : Tuple = np.linalg.eigh(__a )
# Take all the columns in the reverse order (-1), and then takes only the first
_a : List[str] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_a : Tuple = np.dot(filtered_eigenvectors.T ,__a )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format='''%(message)s''' ,force=__a )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : int ,__a : int ) -> np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_a , _a : Optional[Any] = eigh(
covariance_between_classes(__a ,__a ,__a ) ,covariance_within_classes(__a ,__a ,__a ) ,)
_a : Optional[Any] = eigenvectors[:, ::-1][:, :dimensions]
_a , _a , _a : Any = np.linalg.svd(__a )
_a : Tuple = svd_matrix[:, 0:dimensions]
_a : Dict = np.dot(filtered_svd_matrix.T ,__a )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format='''%(message)s''' ,force=__a )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_a : Optional[Any] = np.array([0, 0, 0, 1, 1] )
_a : int = 2
_a : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__a ) as error_info:
_a : Tuple = linear_discriminant_analysis(
__a ,__a ,__a ,__a )
if isinstance(__a ,np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : int = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_a : Optional[int] = 2
_a : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(__a ) as error_info:
_a : Optional[Any] = principal_component_analysis(__a ,__a )
if not np.allclose(__a ,__a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def __lowercase ( self , _a , _a , _a=[1, 1_0, 1_0_0] , _a=4 , _a=3.0 ) -> Tuple:
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=_a ) as executor:
_a : Dict = []
_a : str = Counter()
_a : Any = 0
_a : List[str] = defaultdict(_a )
for task_id, (candidates, test_case) in enumerate(zip(_a , _a ) ):
for candidate in candidates:
_a : Any = candidate + '''\n''' + test_case
_a : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
_a : Dict = executor.submit(_a , *_a )
futures.append(_a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_a ):
_a : Any = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
_a , _a : Dict = [], []
for result in results.values():
result.sort()
_a : str = [r[1]['''passed'''] for r in result]
total.append(len(_a ) )
correct.append(sum(_a ) )
_a : str = np.array(_a )
_a : int = np.array(_a )
_a : List[str] = k
_a : Dict = {F"""pass@{k}""": estimate_pass_at_k(_a , _a , _a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( __a : Tuple ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
def estimator(__a : int ,__a : int ,__a : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(__a ,__a ):
_a : List[str] = itertools.repeat(__a ,len(__a ) )
else:
assert len(__a ) == len(__a )
_a : Optional[Any] = iter(__a )
return np.array([estimator(int(__a ) ,int(__a ) ,__a ) for n, c in zip(__a ,__a )] )
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
a__ = None
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : List[Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' ,metavar='''data.json''' ,help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' ,metavar='''pred.json''' ,help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' ,'''-o''' ,metavar='''eval.json''' ,help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' ,'''-n''' ,metavar='''na_prob.json''' ,help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' ,'''-t''' ,type=__a ,default=1.0 ,help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' ,)
parser.add_argument(
'''--out-image-dir''' ,'''-p''' ,metavar='''out_images''' ,default=__a ,help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' ,'''-v''' ,action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __UpperCAmelCase ( __a : Any ) -> List[str]:
"""simple docstring"""
_a : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : List[Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __UpperCAmelCase ( __a : List[Any] ) -> Dict:
"""simple docstring"""
def remove_articles(__a : Union[str, Any] ):
return ARTICLES_REGEX.sub(''' ''' ,__a )
def white_space_fix(__a : Dict ):
return " ".join(text.split() )
def remove_punc(__a : List[str] ):
_a : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def __UpperCAmelCase ( __a : Tuple ) -> Optional[int]:
"""simple docstring"""
if not s:
return []
return normalize_answer(__a ).split()
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def __UpperCAmelCase ( __a : Dict ,__a : int ) -> str:
"""simple docstring"""
_a : Dict = get_tokens(__a )
_a : Any = get_tokens(__a )
_a : Dict = collections.Counter(__a ) & collections.Counter(__a )
_a : Optional[int] = sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_a : str = 1.0 * num_same / len(__a )
_a : Dict = 1.0 * num_same / len(__a )
_a : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a : Dict = {}
_a : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : Optional[int] = qa['''id''']
_a : str = [t for t in qa['''answers''']['''text'''] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_a : Tuple = ['''''']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
_a : Any = preds[qid]
# Take max over all gold answers
_a : Tuple = max(compute_exact(__a ,__a ) for a in gold_answers )
_a : str = max(compute_fa(__a ,__a ) for a in gold_answers )
return exact_scores, fa_scores
def __UpperCAmelCase ( __a : Any ,__a : List[str] ,__a : Dict ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
_a : List[str] = {}
for qid, s in scores.items():
_a : str = na_probs[qid] > na_prob_thresh
if pred_na:
_a : Tuple = float(not qid_to_has_ans[qid] )
else:
_a : Any = s
return new_scores
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[int] ,__a : Tuple=None ) -> List[str]:
"""simple docstring"""
if not qid_list:
_a : List[str] = len(__a )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_00.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
_a : Optional[Any] = len(__a )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __UpperCAmelCase ( __a : Dict ,__a : Union[str, Any] ,__a : Tuple ) -> Any:
"""simple docstring"""
for k in new_eval:
_a : Any = new_eval[k]
def __UpperCAmelCase ( __a : List[str] ,__a : Dict ,__a : int ,__a : Optional[int] ) -> str:
"""simple docstring"""
plt.step(__a ,__a ,color='''b''' ,alpha=0.2 ,where='''post''' )
plt.fill_between(__a ,__a ,step='''post''' ,alpha=0.2 ,color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[Any] ,__a : int ,__a : Union[str, Any] ,__a : Any=None ,__a : str=None ) -> str:
"""simple docstring"""
_a : Any = sorted(__a ,key=lambda __a : na_probs[k] )
_a : int = 0.0
_a : Optional[Any] = 1.0
_a : int = 0.0
_a : Any = [1.0]
_a : str = [0.0]
_a : Optional[Any] = 0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_a : List[str] = true_pos / float(i + 1 )
_a : List[Any] = true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a ,__a ,__a ,__a )
return {"ap": 1_00.0 * avg_prec}
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Dict ,__a : Dict ,__a : Optional[int] ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
_a : Union[str, Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_a : int = make_precision_recall_eval(
__a ,__a ,__a ,__a ,out_image=os.path.join(__a ,'''pr_exact.png''' ) ,title='''Precision-Recall curve for Exact Match score''' ,)
_a : Tuple = make_precision_recall_eval(
__a ,__a ,__a ,__a ,out_image=os.path.join(__a ,'''pr_f1.png''' ) ,title='''Precision-Recall curve for F1 score''' ,)
_a : Union[str, Any] = {k: float(__a ) for k, v in qid_to_has_ans.items()}
_a : Dict = make_precision_recall_eval(
__a ,__a ,__a ,__a ,out_image=os.path.join(__a ,'''pr_oracle.png''' ) ,title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' ,)
merge_eval(__a ,__a ,'''pr_exact''' )
merge_eval(__a ,__a ,'''pr_f1''' )
merge_eval(__a ,__a ,'''pr_oracle''' )
def __UpperCAmelCase ( __a : str ,__a : Optional[int] ,__a : Optional[Any] ,__a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not qid_list:
return
_a : List[str] = [na_probs[k] for k in qid_list]
_a : Optional[int] = np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a ,weights=__a ,bins=20 ,range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__a ,F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ,__a : Union[str, Any] ,__a : Optional[Any] ) -> str:
"""simple docstring"""
_a : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_a : Dict = num_no_ans
_a : List[Any] = cur_score
_a : str = 0.0
_a : Tuple = sorted(__a ,key=lambda __a : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_a : List[str] = scores[qid]
else:
if preds[qid]:
_a : Tuple = -1
else:
_a : Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
_a : Dict = cur_score
_a : List[str] = na_probs[qid]
return 1_00.0 * best_score / len(__a ), best_thresh
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : List[str] ,__a : str ,__a : Optional[int] ,__a : List[str] ) -> Dict:
"""simple docstring"""
_a , _a : str = find_best_thresh(__a ,__a ,__a ,__a )
_a , _a : List[str] = find_best_thresh(__a ,__a ,__a ,__a )
_a : Union[str, Any] = best_exact
_a : Union[str, Any] = exact_thresh
_a : int = best_fa
_a : int = fa_thresh
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
with open(OPTS.data_file ) as f:
_a : Dict = json.load(__a )
_a : Dict = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
_a : Any = json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_a : List[Any] = json.load(__a )
else:
_a : Optional[int] = {k: 0.0 for k in preds}
_a : List[str] = make_qid_to_has_ans(__a ) # maps qid to True/False
_a : Dict = [k for k, v in qid_to_has_ans.items() if v]
_a : Any = [k for k, v in qid_to_has_ans.items() if not v]
_a , _a : Dict = get_raw_scores(__a ,__a )
_a : Any = apply_no_ans_threshold(__a ,__a ,__a ,OPTS.na_prob_thresh )
_a : str = apply_no_ans_threshold(__a ,__a ,__a ,OPTS.na_prob_thresh )
_a : Optional[int] = make_eval_dict(__a ,__a )
if has_ans_qids:
_a : Tuple = make_eval_dict(__a ,__a ,qid_list=__a )
merge_eval(__a ,__a ,'''HasAns''' )
if no_ans_qids:
_a : str = make_eval_dict(__a ,__a ,qid_list=__a )
merge_eval(__a ,__a ,'''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__a ,__a ,__a ,__a ,__a ,__a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a ,__a ,__a ,__a ,__a ,OPTS.out_image_dir )
histogram_na_prob(__a ,__a ,OPTS.out_image_dir ,'''hasAns''' )
histogram_na_prob(__a ,__a ,OPTS.out_image_dir ,'''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file ,'''w''' ) as f:
json.dump(__a ,__a )
else:
print(json.dumps(__a ,indent=2 ) )
if __name__ == "__main__":
a__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "EncodecFeatureExtractor"
UpperCAmelCase__ : str = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _a , _a ) -> Dict:
super().__init__(_a , _a )
_a : int = self.feature_extractor
_a : Optional[int] = False
def __lowercase ( self , _a=None , _a=None , _a=True ) -> List[str]:
return self.tokenizer.get_decoder_prompt_ids(task=_a , language=_a , no_timestamps=_a )
def __call__( self , *_a , **_a ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
_a : Any = kwargs.pop('''audio''' , _a )
_a : Tuple = kwargs.pop('''sampling_rate''' , _a )
_a : List[Any] = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
_a : Any = args[0]
_a : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_a : Any = self.tokenizer(_a , **_a )
if audio is not None:
_a : Tuple = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_a : Any = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
_a : List[Any] = audio_inputs['''padding_mask''']
return inputs
def __lowercase ( self , *_a , **_a ) -> Optional[int]:
_a : Tuple = kwargs.pop('''audio''' , _a )
_a : List[Any] = kwargs.pop('''padding_mask''' , _a )
if len(_a ) > 0:
_a : List[str] = args[0]
_a : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(_a , padding_mask=_a )
else:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
def __lowercase ( self , _a , _a = None ) -> List[np.ndarray]:
_a : Any = to_numpy(_a )
_a , _a , _a : Optional[int] = audio_values.shape
if padding_mask is None:
return list(_a )
_a : List[str] = to_numpy(_a )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_a : Optional[Any] = seq_len - padding_mask.shape[-1]
_a : Dict = 1 - self.feature_extractor.padding_value
_a : Dict = np.pad(_a , ((0, 0), (0, difference)) , '''constant''' , constant_values=_a )
_a : Optional[Any] = audio_values.tolist()
for i in range(_a ):
_a : Any = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_a : Optional[int] = sliced_audio.reshape(_a , -1 )
return audio_values
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
a__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __UpperCAmelCase ( __a : str ,__a : Dict ,__a : Optional[Any]=None ,__a : Optional[Any]=None ,__a : str=None ,__a : int=None ,__a : Optional[Any]=None ,__a : Union[str, Any]=None ,) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
_a : Union[str, Any] = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
_a : str = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
_a : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=False , _a=9_9 , _a=1_6 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=3_2 , _a=2 , _a=1 , _a=0 , _a=0.02 , ) -> List[str]:
_a : List[Any] = parent
_a : List[Any] = batch_size
_a : List[str] = seq_length
_a : Any = is_training
_a : Tuple = use_labels
_a : List[Any] = vocab_size
_a : Union[str, Any] = hidden_size
_a : Optional[Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Union[str, Any] = intermediate_size
_a : Dict = hidden_act
_a : Any = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : List[Any] = eos_token_id
_a : Optional[Any] = pad_token_id
_a : Any = bos_token_id
_a : Any = initializer_range
def __lowercase ( self ) -> Dict:
_a : int = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_a : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_a : Tuple = shift_tokens_right(_a , 1 , 2 )
_a : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , )
_a : Tuple = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def __lowercase ( self ) -> List[str]:
_a , _a : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self , _a , _a , _a ) -> str:
_a : Optional[int] = 2_0
_a : Union[str, Any] = model_class_name(_a )
_a : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
_a , _a : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_a : Any = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
_a : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_a : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_a : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
_a : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_a : Any = model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
_a : Union[str, Any] = model.decode(_a , _a )
_a : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : Union[str, Any] = 2_0
_a : str = model_class_name(_a )
_a : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
_a , _a : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_a : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_a : Tuple = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
_a : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_a : List[str] = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
_a : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_a : List[str] = model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
_a : Dict = model.decode(_a , _a , decoder_attention_mask=_a )
_a : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 99
def __lowercase ( self ) -> List[str]:
_a : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_a : List[str] = input_ids.shape[0]
_a : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowercase ( self ) -> int:
_a , _a , _a : List[Any] = self._get_config_and_data()
_a : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_a )
_a : Dict = lm_model(input_ids=_a )
_a : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _a )
def __lowercase ( self ) -> Dict:
_a : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_a : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_a )
_a : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_a : Optional[Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_a : Tuple = lm_model(input_ids=_a , decoder_input_ids=_a )
_a : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_a : List[str] = shift_tokens_right(_a , 1 , 2 )
_a : Union[str, Any] = np.equal(_a , 1 ).astype(np.floataa ).sum()
_a : int = np.equal(_a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase , __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = FlaxBlenderbotModelTester(self )
def __lowercase ( self ) -> int:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a )
def __lowercase ( self ) -> Tuple:
_a , _a : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a : List[Any] = self._prepare_for_class(_a , _a )
_a : Optional[Any] = model_class(_a )
@jax.jit
def encode_jitted(_a , _a=None , **_a ):
return model.encode(input_ids=_a , attention_mask=_a )
with self.subTest('''JIT Enabled''' ):
_a : Optional[Any] = encode_jitted(**_a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_a : Tuple = encode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self ) -> Dict:
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a : List[str] = model_class(_a )
_a : List[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_a : Union[str, Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_a , _a , _a ):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('''JIT Enabled''' ):
_a : Optional[int] = decode_jitted(**_a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_a : Dict = decode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
_a : str = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_a : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
_a : str = model(_a )
self.assertIsNotNone(_a )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def __lowercase ( self ) -> Any:
_a : Tuple = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
_a : Union[str, Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_a : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_a )
_a : str = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_a : List[Any] = ['''Sam''']
_a : str = tokenizer(_a , return_tensors='''jax''' )
_a : List[Any] = model.generate(**_a , **_a )
_a : Optional[Any] = '''Sam is a great name. It means "sun" in Gaelic.'''
_a : Dict = tokenizer.batch_decode(_a , **_a )
assert generated_txt[0].strip() == tgt_text
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
from __future__ import annotations
import time
a__ = list[tuple[int, int]]
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a , _a , _a ) -> Any:
_a : Any = pos_x
_a : Union[str, Any] = pos_y
_a : Dict = (pos_y, pos_x)
_a : Union[str, Any] = goal_x
_a : Union[str, Any] = goal_y
_a : int = parent
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a ) -> int:
_a : str = Node(start[1] , start[0] , goal[1] , goal[0] , _a )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , _a )
_a : int = [self.start]
_a : List[Any] = False
def __lowercase ( self ) -> Path | None:
while self.node_queue:
_a : Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : List[Any] = True
return self.retrace_path(_a )
_a : Optional[Any] = self.get_successors(_a )
for node in successors:
self.node_queue.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __lowercase ( self , _a ) -> list[Node]:
_a : List[Any] = []
for action in delta:
_a : List[Any] = parent.pos_x + action[1]
_a : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_a , _a , self.target.pos_y , self.target.pos_x , _a ) )
return successors
def __lowercase ( self , _a ) -> Path:
_a : Dict = node
_a : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : List[Any] = current_node.parent
path.reverse()
return path
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a ) -> List[Any]:
_a : Optional[int] = BreadthFirstSearch(_a , _a )
_a : List[str] = BreadthFirstSearch(_a , _a )
_a : Tuple = False
def __lowercase ( self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[Any] = True
return self.retrace_bidirectional_path(
_a , _a )
_a : List[Any] = current_bwd_node
_a : Dict = current_fwd_node
_a : int = {
self.fwd_bfs: self.fwd_bfs.get_successors(_a ),
self.bwd_bfs: self.bwd_bfs.get_successors(_a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __lowercase ( self , _a , _a ) -> Path:
_a : str = self.fwd_bfs.retrace_path(_a )
_a : Any = self.bwd_bfs.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
_a : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = BreadthFirstSearch(init, goal)
a__ = bfs.search()
a__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
a__ = time.time()
a__ = BidirectionalBreadthFirstSearch(init, goal)
a__ = bd_bfs.search()
a__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
a__ = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def __UpperCAmelCase ( __a : str = "dhaka" ,__a : int = 5 ) -> int:
"""simple docstring"""
_a : Optional[Any] = min(__a ,50 ) # Prevent abuse!
_a : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_a : Dict = requests.get('''https://www.google.com/search''' ,params=__a ,headers=__a )
_a : List[str] = BeautifulSoup(html.text ,'''html.parser''' )
_a : Union[str, Any] = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' ,str(soup.select('''script''' ) ) ) )
_a : Optional[Any] = json.dumps(__a )
_a : Tuple = json.loads(__a )
_a : List[str] = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' ,__a ,)
if not matched_google_image_data:
return 0
_a : Dict = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' ,'''''' ,str(__a ) ,)
_a : List[str] = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' ,__a ,)
for index, fixed_full_res_image in enumerate(__a ):
if index >= max_images:
return index
_a : List[Any] = bytes(__a ,'''ascii''' ).decode(
'''unicode-escape''' )
_a : Optional[Any] = bytes(__a ,'''ascii''' ).decode(
'''unicode-escape''' )
_a : Any = urllib.request.build_opener()
_a : List[str] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(__a )
_a : Any = F"""query_{query.replace(' ' ,'_' )}"""
if not os.path.exists(__a ):
os.makedirs(__a )
urllib.request.urlretrieve( # noqa: S310
__a ,F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
a__ = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a__ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a__ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : Dict = json.loads(f.read() )
_a : Tuple = collections.OrderedDict()
_a : List[str] = collections.OrderedDict()
_a : Any = collections.OrderedDict()
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : List[str] = f.readlines()
_a : Dict = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__a ):
_a : List[str] = b
_a : Optional[Any] = idx
for wd in b:
_a : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|startoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ) -> Optional[Any]:
super().__init__(
unk_token=_a , pad_token=_a , bos_token=_a , eos_token=_a , do_clean_text=_a , **_a , )
if not os.path.isfile(_a ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_a ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
_a : Dict = do_clean_text
_a , _a , _a , _a : Any = load_vocab_and_emoji(_a , _a )
_a : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowercase ( self ) -> Any:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowercase ( self ) -> List[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowercase ( self , _a ) -> Tuple:
return self.subword_tokenizer.tokenize(_a , clean=self.do_clean_text )
def __lowercase ( self , _a ) -> Any:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def __lowercase ( self , _a ) -> Dict:
return self.subword_tokenizer.convert_id_to_token(_a )
def __lowercase ( self , _a ) -> Dict:
_a : Optional[int] = ''''''.join(_a ).strip()
return out_string
def __lowercase ( self , _a ) -> List[int]:
_a : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_a : Dict = input_ids[-self.model_max_length :]
return input_ids
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
_a : int = 0
if os.path.isdir(_a ):
_a : Any = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Dict = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
_a : str = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
_a : Optional[int] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_a : Tuple = token_index
writer.write(''','''.join(_a ) + '''\n''' )
index += 1
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _a )
return vocab_file, emoji_file
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> Optional[Any]:
_a : Tuple = vocab # same as swe
_a : str = ids_to_tokens # same as bpe
_a : str = emoji
_a : List[str] = np.max([len(_a ) for w in self.vocab.keys()] )
_a : int = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
_a : Any = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
_a : Union[str, Any] = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
_a : Any = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_a : Union[str, Any] = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_a : int = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
_a : Union[str, Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
_a : Any = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
_a : str = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Optional[Any]:
return len(self.ids_to_tokens )
def __lowercase ( self , _a ) -> int:
_a : Union[str, Any] = self.content_repattera.sub('''<URL>''' , _a )
_a : List[Any] = self.content_repattera.sub('''<EMAIL>''' , _a )
_a : Optional[Any] = self.content_repattera.sub('''<TEL>''' , _a )
_a : str = self.content_repattera.sub('''<DATE>''' , _a )
_a : Union[str, Any] = self.content_repattera.sub('''<DATE>''' , _a )
_a : Dict = self.content_repattera.sub('''<PRICE>''' , _a )
_a : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_a : Union[str, Any] = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def __lowercase ( self , _a , _a=False ) -> Dict:
_a : List[str] = text.replace(''' ''' , '''<SP>''' )
_a : Any = text.replace(''' ''' , '''<SP>''' )
_a : Any = text.replace('''\r\n''' , '''<BR>''' )
_a : int = text.replace('''\n''' , '''<BR>''' )
_a : Optional[int] = text.replace('''\r''' , '''<BR>''' )
_a : Union[str, Any] = text.replace('''\t''' , '''<TAB>''' )
_a : Optional[Any] = text.replace('''—''' , '''ー''' )
_a : Tuple = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
_a : Union[str, Any] = text.replace(_a , _a )
if clean:
_a : int = self.clean_text(_a )
def check_simbol(_a ):
_a : str = x.encode()
if len(_a ) == 1 and len(_a ) == 2:
_a : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2_a_1 and c <= 0xc_2_b_f)
or (c >= 0xc_7_8_0 and c <= 0xc_7_8_3)
or (c >= 0xc_a_b_9 and c <= 0xc_b_b_f)
or (c >= 0xc_c_8_0 and c <= 0xc_d_a_2)
):
return True
return False
def checkuae(_a ):
_a : Any = x.encode()
if len(_a ) == 1 and len(_a ) == 3:
_a : List[Any] = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe_2_8_0_8_0 and c <= 0xe_2_b_0_7_f:
return True
return False
_a : str = 0
_a : Optional[Any] = []
while pos < len(_a ):
_a : int = min(len(_a ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
_a : List[Any] = [] # (token_id, token, pos)
for e in range(_a , _a , -1 ):
_a : Tuple = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_a ) > 2:
_a : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_a ) > 0:
# the smallest token_id is adopted
_a , _a , _a : List[Any] = sorted(_a , key=lambda _a : x[0] )[0]
result.append(_a )
_a : Tuple = e
else:
_a : Optional[int] = pos + 1
_a : str = text[pos:end]
if check_simbol(_a ):
result.append('''<KIGOU>''' )
elif checkuae(_a ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
_a : str = end
return result
def __lowercase ( self , _a , _a="\n" ) -> List[str]:
_a : Optional[int] = []
_a : str = []
_a : int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_a ) > 0:
words.append(bytearray(_a ).decode('''utf-8''' , errors='''replace''' ) )
_a : List[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_a )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_a )
if len(_a ) > 0:
words.append(bytearray(_a ).decode('''utf-8''' , errors='''replace''' ) )
_a : str = ''''''.join(_a )
return text
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = KandinskyVaaPriorPipeline
UpperCAmelCase__ : List[Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt", "negative_prompt"]
UpperCAmelCase__ : List[str] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Union[str, Any] = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> Optional[Any]:
return 3_2
@property
def __lowercase ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def __lowercase ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Union[str, Any]:
return 1_0_0
@property
def __lowercase ( self ) -> str:
_a : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowercase ( self ) -> Any:
torch.manual_seed(0 )
_a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_a )
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : Union[str, Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
_a : Dict = PriorTransformer(**_a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_a : str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowercase ( self ) -> int:
torch.manual_seed(0 )
_a : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
_a : Optional[Any] = CLIPVisionModelWithProjection(_a )
return model
@property
def __lowercase ( self ) -> Optional[Any]:
_a : str = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def __lowercase ( self ) -> Dict:
_a : int = self.dummy_prior
_a : int = self.dummy_image_encoder
_a : int = self.dummy_text_encoder
_a : Optional[int] = self.dummy_tokenizer
_a : Any = self.dummy_image_processor
_a : Union[str, Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=_a , clip_sample_range=10.0 , )
_a : Tuple = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowercase ( self , _a , _a=0 ) -> Tuple:
if str(_a ).startswith('''mps''' ):
_a : Dict = torch.manual_seed(_a )
else:
_a : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_a : Tuple = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = '''cpu'''
_a : Tuple = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) )
_a : Any = output.image_embeds
_a : Optional[int] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Dict = image[0, -1_0:]
_a : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_a : str = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowercase ( self ) -> int:
_a : Union[str, Any] = torch_device == '''cpu'''
_a : Dict = True
_a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_a , relax_max_difference=_a , test_mean_pixel_difference=_a , )
@skip_mps
def __lowercase ( self ) -> Union[str, Any]:
_a : List[Any] = torch_device == '''cpu'''
_a : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=_a , test_mean_pixel_difference=_a , )
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=9_9 , _a=1_6 , _a=3_6 , _a=6 , _a=6 , _a=6 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[Any]:
_a : Union[str, Any] = parent
_a : int = batch_size
_a : Dict = seq_length
_a : Union[str, Any] = is_training
_a : Union[str, Any] = use_input_mask
_a : int = use_token_type_ids
_a : str = use_labels
_a : int = vocab_size
_a : Optional[Any] = embedding_size
_a : str = hidden_size
_a : Optional[int] = num_hidden_layers
_a : str = num_hidden_groups
_a : List[Any] = num_attention_heads
_a : List[Any] = intermediate_size
_a : List[str] = hidden_act
_a : int = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : List[str] = max_position_embeddings
_a : List[Any] = type_vocab_size
_a : List[str] = type_sequence_label_size
_a : Optional[Any] = initializer_range
_a : Tuple = num_labels
_a : str = num_choices
_a : Tuple = scope
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Optional[Any] = None
if self.use_input_mask:
_a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_a : List[Any] = None
if self.use_token_type_ids:
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : List[str] = None
_a : Tuple = None
_a : Any = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> Tuple:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
_a : Dict = AlbertModel(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a )
_a : Union[str, Any] = model(_a , token_type_ids=_a )
_a : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
_a : Any = AlbertForPreTraining(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , sentence_order_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
_a : Dict = AlbertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Optional[Any] = AlbertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_a : Tuple = self.num_labels
_a : Dict = AlbertForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
_a : Optional[int] = self.num_labels
_a : Union[str, Any] = AlbertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Dict = self.num_choices
_a : int = AlbertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[int] = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
def __lowercase ( self , _a , _a , _a=False ) -> List[str]:
_a : int = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
_a : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
_a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def __lowercase ( self ) -> str:
_a : Tuple = AlbertModelTester(self )
_a : int = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Any:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> str:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def __lowercase ( self ) -> int:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*_a )
@slow
def __lowercase ( self ) -> Dict:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = AlbertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> int:
_a : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
_a : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_a : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a : List[str] = model(_a , attention_mask=_a )[0]
_a : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _a )
_a : List[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , _a , _a ) -> List[Any]:
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_a ) for s in shape] )}.npy"""
def __lowercase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowercase ( self , _a=0 , _a=(4, 4, 6_4, 6_4) , _a=False ) -> str:
_a : int = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return image
def __lowercase ( self , _a=False , _a="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
_a : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = '''bf16''' if fpaa else None
_a , _a : Dict = FlaxUNetaDConditionModel.from_pretrained(
_a , subfolder='''unet''' , dtype=_a , revision=_a )
return model, params
def __lowercase ( self , _a=0 , _a=(4, 7_7, 7_6_8) , _a=False ) -> int:
_a : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
_a : Tuple = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __lowercase ( self , _a , _a , _a ) -> List[Any]:
_a , _a : List[str] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_a )
_a : Optional[Any] = self.get_latents(_a , fpaa=_a )
_a : Any = self.get_encoder_hidden_states(_a , fpaa=_a )
_a : Optional[Any] = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
_a : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : Dict = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __lowercase ( self , _a , _a , _a ) -> Any:
_a , _a : Any = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_a )
_a : int = self.get_latents(_a , shape=(4, 4, 9_6, 9_6) , fpaa=_a )
_a : Tuple = self.get_encoder_hidden_states(_a , shape=(4, 7_7, 1_0_2_4) , fpaa=_a )
_a : int = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
_a : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : int = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[int] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=__a ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=__a ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=__a )
return parser.parse_args()
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : Tuple = parse_args()
# Import training_script as a module.
_a : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a : int = script_fpath.stem
_a : List[Any] = importlib.import_module(__a )
# Patch sys.argv
_a : Tuple = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
_a : Union[str, Any] = 2**power
_a : Union[str, Any] = 0
while n:
_a , _a : Dict = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "M-CLIP"
def __init__( self , _a=1_0_2_4 , _a=7_6_8 , **_a ) -> Optional[int]:
_a : List[str] = transformerDimSize
_a : Optional[int] = imageDimSize
super().__init__(**_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = MCLIPConfig
def __init__( self , _a , *_a , **_a ) -> int:
super().__init__(_a , *_a , **_a )
_a : Union[str, Any] = XLMRobertaModel(_a )
_a : Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __lowercase ( self , _a , _a ) -> Tuple:
_a : str = self.transformer(input_ids=_a , attention_mask=_a )[0]
_a : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_a ), embs
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
import math
def __UpperCAmelCase ( __a : int ) -> str:
"""simple docstring"""
_a : List[str] = 0
_a : List[str] = 0
while num > 0:
_a : str = num % 8
_a : int = octal + (remainder * math.floor(math.pow(10 ,__a ) ))
counter += 1
_a : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__a )}"""
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( __a : List[str] ) -> str:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( __a : int ,__a : List[Any] ) -> Optional[int]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( __a : Any ) -> Optional[Any]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( __a : Any ,__a : List[str] ,*__a : Any ) -> Dict:
"""simple docstring"""
try:
return fun(__a ,*__a ), None
except Exception as e:
return None, e
a__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
a__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
a__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
a__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' ,(
pytest.param(_add_items ,id='''add items''' ),
pytest.param(_overwrite_items ,id='''overwrite items''' ),
pytest.param(_delete_items ,id='''delete items''' ),
pytest.param(_access_absent_items ,id='''access absent items''' ),
pytest.param(_add_with_resize_up ,id='''add with resize up''' ),
pytest.param(_add_with_resize_down ,id='''add with resize down''' ),
) ,)
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : str = HashMap(initial_block_size=4 )
_a : Union[str, Any] = {}
for _, (fun, *args) in enumerate(__a ):
_a , _a : List[Any] = _run_operation(__a ,__a ,*__a )
_a , _a : List[Any] = _run_operation(__a ,__a ,*__a )
assert my_res == py_res
assert str(__a ) == str(__a )
assert set(__a ) == set(__a )
assert len(__a ) == len(__a )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
def is_public(__a : str ) -> bool:
return not name.startswith('''_''' )
_a : List[Any] = {name for name in dir({} ) if is_public(__a )}
_a : Tuple = {name for name in dir(HashMap() ) if is_public(__a )}
assert dict_public_names > hash_public_names
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : torch.Tensor # [batch_size x 3]
UpperCAmelCase__ : torch.Tensor # [batch_size x 3]
UpperCAmelCase__ : torch.Tensor # [batch_size x 3]
UpperCAmelCase__ : torch.Tensor # [batch_size x 3]
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float
UpperCAmelCase__ : float
UpperCAmelCase__ : Tuple[int]
def __lowercase ( self ) -> Any:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowercase ( self ) -> Tuple:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __lowercase ( self ) -> List[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __lowercase ( self ) -> torch.Tensor:
_a : List[str] = torch.arange(self.height * self.width )
_a : Dict = torch.stack(
[
pixel_indices % self.width,
torch.div(_a , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def __lowercase ( self ) -> Optional[Any]:
_a , *_a : Optional[Any] = self.shape
_a : str = int(np.prod(_a ) )
_a : Dict = self.get_image_coords()
_a : int = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a : int = self.get_camera_rays(_a )
_a : Tuple = rays.view(_a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __lowercase ( self , _a ) -> torch.Tensor:
_a , *_a , _a : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a : int = coords.view(_a , -1 , 2 )
_a : Any = self.resolution()
_a : List[Any] = self.fov()
_a : str = (flat.float() / (res - 1)) * 2 - 1
_a : List[Any] = fracs * torch.tan(fov / 2 )
_a : Any = fracs.view(_a , -1 , 2 )
_a : Dict = (
self.z.view(_a , 1 , 3 )
+ self.x.view(_a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_a , 1 , 3 ) * fracs[:, :, 1:]
)
_a : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=_a )
_a : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_a , *_a , 2 , 3 )
def __lowercase ( self , _a , _a ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_a , height=_a , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( __a : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_a : List[Any] = []
_a : str = []
_a : str = []
_a : Tuple = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_a : Dict = np.array([np.sin(__a ), np.cos(__a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a : List[str] = -z * 4
_a : Dict = np.array([np.cos(__a ), -np.sin(__a ), 0.0] )
_a : Union[str, Any] = np.cross(__a ,__a )
origins.append(__a )
xs.append(__a )
ys.append(__a )
zs.append(__a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__a ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(__a ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(__a ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(__a ,axis=0 ) ).float() ,width=__a ,height=__a ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(__a )) ,)
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
a__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : str = input('''Enter message: ''' )
_a : int = input('''Enter key [alphanumeric]: ''' )
_a : List[Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
_a : List[str] = '''encrypt'''
_a : Optional[Any] = encrypt_message(__a ,__a )
elif mode.lower().startswith('''d''' ):
_a : Dict = '''decrypt'''
_a : Tuple = decrypt_message(__a ,__a )
print(F"""\n{mode.title()}ed message:""" )
print(__a )
def __UpperCAmelCase ( __a : str ,__a : str ) -> str:
"""simple docstring"""
return translate_message(__a ,__a ,'''encrypt''' )
def __UpperCAmelCase ( __a : str ,__a : str ) -> str:
"""simple docstring"""
return translate_message(__a ,__a ,'''decrypt''' )
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ) -> str:
"""simple docstring"""
_a : Dict = []
_a : Optional[Any] = 0
_a : Any = key.upper()
for symbol in message:
_a : List[str] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
_a : int = 0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : tuple ,__a : Path ,__a : Optional[Any] ,__a : Any ,__a : List[str] ,__a : int ,__a : Optional[Any]=False ,) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=__a ,exist_ok=__a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__a ,__a ,f=output_path.as_posix() ,input_names=__a ,output_names=__a ,dynamic_axes=__a ,do_constant_folding=__a ,use_external_data_format=__a ,enable_onnx_checker=__a ,opset_version=__a ,)
else:
export(
__a ,__a ,f=output_path.as_posix() ,input_names=__a ,output_names=__a ,dynamic_axes=__a ,do_constant_folding=__a ,opset_version=__a ,)
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : int ,__a : bool = False ) -> Tuple:
"""simple docstring"""
_a : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_a : Dict = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
_a : List[str] = '''cpu'''
_a : Any = Path(__a )
# VAE DECODER
_a : List[str] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
_a : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_a : Union[str, Any] = vae_decoder.decode
onnx_export(
__a ,model_args=(
torch.randn(1 ,__a ,25 ,25 ).to(device=__a ,dtype=__a ),
False,
) ,output_path=output_path / '''vae_decoder''' / '''model.onnx''' ,ordered_input_names=['''latent_sample''', '''return_dict'''] ,output_names=['''sample'''] ,dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} ,opset=__a ,)
del vae_decoder
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
a__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCAmelCase ( __a : Any ,__a : Optional[int] ,__a : str=1E-12 ) -> Dict:
"""simple docstring"""
_a : Any = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__a ,axis=1 ) ,a_min=__a ) ).T
_a : Optional[Any] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__a ,axis=1 ) ,a_min=__a ) ).T
return jnp.matmul(__a ,norm_emb_a.T )
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : CLIPConfig
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = FlaxCLIPVisionModule(self.config.vision_config )
_a : Tuple = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
_a : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
_a : Optional[int] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_a : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,) )
_a : int = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self , _a ) -> Union[str, Any]:
_a : str = self.vision_model(_a )[1]
_a : Optional[Any] = self.visual_projection(_a )
_a : Tuple = jax_cosine_distance(_a , self.special_care_embeds )
_a : Optional[int] = jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_a : Union[str, Any] = 0.0
_a : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_a : Dict = jnp.round(_a , 3 )
_a : Optional[int] = jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
_a : Union[str, Any] = is_special_care * 0.01
_a : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_a : Optional[int] = jnp.round(_a , 3 )
_a : int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = CLIPConfig
UpperCAmelCase__ : Tuple = "clip_input"
UpperCAmelCase__ : Optional[int] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _a , _a = None , _a = 0 , _a = jnp.floataa , _a = True , **_a , ) -> Optional[Any]:
if input_shape is None:
_a : Optional[Any] = (1, 2_2_4, 2_2_4, 3)
_a : int = self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def __lowercase ( self , _a , _a , _a = None ) -> FrozenDict:
# init input tensor
_a : Optional[int] = jax.random.normal(_a , _a )
_a , _a : str = jax.random.split(_a )
_a : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
_a : Tuple = self.module.init(_a , _a )['''params''']
return random_params
def __call__( self , _a , _a = None , ) -> Tuple:
_a : List[str] = jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
def __UpperCAmelCase ( __a : list ) -> list:
"""simple docstring"""
if len(__a ) <= 1:
return [tuple(__a )]
_a : Tuple = []
def generate(__a : int ,__a : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 ,__a )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_a , _a : Dict = arr[k - 1], arr[i]
else: # k is odd
_a , _a : int = arr[k - 1], arr[0]
generate(k - 1 ,__a )
generate(len(__a ) ,__a )
return res
if __name__ == "__main__":
a__ = input('''Enter numbers separated by a comma:\n''').strip()
a__ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
a__ = {
'''camembert-base''': 512,
}
a__ = '''▁'''
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=["<s>NOTUSED", "</s>NOTUSED"] , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_a : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_a : Tuple = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
_a : Union[str, Any] = len(self.fairseq_tokens_to_ids )
_a : str = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_a : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowercase ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self ) -> Dict:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def __lowercase ( self , _a ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self , _a ) -> Tuple:
_a : Dict = []
_a : int = ''''''
_a : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_a : Optional[int] = True
_a : Any = []
else:
current_sub_tokens.append(_a )
_a : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ) -> Union[str, Any]:
_a : Optional[Any] = self.__dict__.copy()
_a : int = None
return state
def __setstate__( self , _a ) -> Optional[int]:
_a : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : List[str] = {}
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 2_5_5 , _a = True , _a = IMAGENET_DEFAULT_MEAN , _a = IMAGENET_DEFAULT_STD , **_a , ) -> None:
super().__init__(**_a )
_a : int = size if size is not None else {'''shortest_edge''': 2_2_4}
_a : List[str] = get_size_dict(_a , default_to_square=_a )
_a : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Optional[int] = get_size_dict(_a , param_name='''crop_size''' )
_a : Optional[int] = do_resize
_a : str = size
_a : List[str] = resample
_a : str = do_center_crop
_a : Optional[Any] = crop_size
_a : Union[str, Any] = do_rescale
_a : str = rescale_factor
_a : List[Any] = do_normalize
_a : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowercase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : Optional[int] = get_size_dict(_a , default_to_square=_a )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_a : Optional[int] = int((2_5_6 / 2_2_4) * size['''shortest_edge'''] )
_a : Any = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
_a : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_a , size=(size_dict['''height'''], size_dict['''width''']) , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_a : int = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> BatchFeature:
_a : int = do_resize if do_resize is not None else self.do_resize
_a : Optional[Any] = resample if resample is not None else self.resample
_a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Any = do_rescale if do_rescale is not None else self.do_rescale
_a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Dict = do_normalize if do_normalize is not None else self.do_normalize
_a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_a : Union[str, Any] = image_std if image_std is not None else self.image_std
_a : Union[str, Any] = size if size is not None else self.size
_a : str = get_size_dict(_a , default_to_square=_a )
_a : Optional[int] = crop_size if crop_size is not None else self.crop_size
_a : Union[str, Any] = get_size_dict(_a , param_name='''crop_size''' )
_a : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : int = [self.resize(_a , _a , _a ) for image in images]
if do_center_crop:
_a : Any = [self.center_crop(_a , _a ) for image in images]
if do_rescale:
_a : Any = [self.rescale(_a , _a ) for image in images]
if do_normalize:
_a : Optional[int] = [self.normalize(_a , _a , _a ) for image in images]
_a : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : Dict = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
from __future__ import annotations
from collections import deque
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> List[str]:
_a : list[dict] = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(_a )
self.set_fail_transitions()
def __lowercase ( self , _a , _a ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __lowercase ( self , _a ) -> None:
_a : Optional[Any] = 0
for character in keyword:
_a : Optional[Any] = self.find_next_state(_a , _a )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_a : List[str] = len(self.adlist ) - 1
else:
_a : Tuple = next_state
self.adlist[current_state]["output"].append(_a )
def __lowercase ( self ) -> None:
_a : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_a )
_a : Optional[int] = 0
while q:
_a : Tuple = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_a )
_a : List[Any] = self.adlist[r]['''fail_state''']
while (
self.find_next_state(_a , self.adlist[child]['''value'''] ) is None
and state != 0
):
_a : List[str] = self.adlist[state]['''fail_state''']
_a : Dict = self.find_next_state(
_a , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
_a : Union[str, Any] = 0
_a : Optional[Any] = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def __lowercase ( self , _a ) -> dict[str, list[int]]:
_a : dict = {} # returns a dict with keywords and list of its occurrences
_a : Union[str, Any] = 0
for i in range(len(_a ) ):
while (
self.find_next_state(_a , string[i] ) is None
and current_state != 0
):
_a : Any = self.adlist[current_state]['''fail_state''']
_a : Tuple = self.find_next_state(_a , string[i] )
if next_state is None:
_a : str = 0
else:
_a : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_a : Tuple = []
result[key].append(i - len(_a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["torch", "scipy"]
def __init__( self , *_a , **_a ) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
a__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
a__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __UpperCAmelCase ( __a : dict[int, list[int]] ,__a : int ,__a : list[bool] ) -> list[int]:
"""simple docstring"""
_a : Tuple = True
_a : List[Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__a ,__a ,__a )
order.append(__a )
return order
def __UpperCAmelCase ( __a : dict[int, list[int]] ,__a : int ,__a : list[bool] ) -> list[int]:
"""simple docstring"""
_a : Tuple = True
_a : List[str] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__a ,__a ,__a )
return component
def __UpperCAmelCase ( __a : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
_a : Tuple = len(__a ) * [False]
_a : dict[int, list[int]] = {vert: [] for vert in range(len(__a ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__a )
_a : Dict = []
for i, was_visited in enumerate(__a ):
if not was_visited:
order += topology_sort(__a ,__a ,__a )
_a : Optional[Any] = []
_a : Tuple = len(__a ) * [False]
for i in range(len(__a ) ):
_a : Dict = order[len(__a ) - i - 1]
if not visited[vert]:
_a : int = find_components(__a ,__a ,__a )
components_list.append(__a )
return components_list
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
from __future__ import annotations
from fractions import Fraction
def __UpperCAmelCase ( __a : int ,__a : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __UpperCAmelCase ( __a : int ) -> list[str]:
"""simple docstring"""
_a : str = []
_a : Tuple = 11
_a : Optional[int] = int('''1''' + '''0''' * digit_len )
for num in range(__a ,__a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__a ,__a ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_a : Any = 10
return solutions
def __UpperCAmelCase ( __a : int = 2 ) -> int:
"""simple docstring"""
_a : Tuple = 1.0
for fraction in fraction_list(__a ):
_a : List[str] = Fraction(__a )
result *= frac.denominator / frac.numerator
return int(__a )
if __name__ == "__main__":
print(solution())
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
_a : Tuple = tempfile.mkdtemp()
_a : List[str] = 5
# Realm tok
_a : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Optional[int] = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_a : str = os.path.join(_a , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : List[str] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_a , exist_ok=_a )
def __lowercase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def __lowercase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> List[str]:
_a : int = RealmConfig(num_block_records=self.num_block_records )
return config
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __lowercase ( self ) -> Dict:
_a : Any = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_a , )
return block_records
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.get_config()
_a : Tuple = self.get_dummy_retriever()
_a : Dict = retriever.tokenizer
_a : Union[str, Any] = np.array([0, 3] , dtype='''long''' )
_a : Any = tokenizer(['''Test question'''] ).input_ids
_a : int = tokenizer(
['''the fourth'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_a : str = config.reader_seq_len
_a , _a , _a , _a : Any = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = self.get_config()
_a : Union[str, Any] = self.get_dummy_retriever()
_a : Tuple = retriever.tokenizer
_a : List[str] = np.array([0, 3, 5] , dtype='''long''' )
_a : int = tokenizer(['''Test question'''] ).input_ids
_a : List[str] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_a : Union[str, Any] = config.reader_seq_len
_a , _a , _a , _a : List[Any] = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' )
self.assertEqual([False, True, True] , _a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a )
def __lowercase ( self ) -> int:
_a : Any = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_a : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_a : Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_a : str = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __UpperCAmelCase ( __a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = SwinConfig(image_size=192 )
if "base" in model_name:
_a : Optional[int] = 6
_a : int = 128
_a : str = (2, 2, 18, 2)
_a : Tuple = (4, 8, 16, 32)
elif "large" in model_name:
_a : Union[str, Any] = 12
_a : List[Any] = 192
_a : str = (2, 2, 18, 2)
_a : Optional[Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
_a : List[str] = window_size
_a : str = embed_dim
_a : Union[str, Any] = depths
_a : Optional[Any] = num_heads
return config
def __UpperCAmelCase ( __a : Tuple ) -> Dict:
"""simple docstring"""
if "encoder.mask_token" in name:
_a : int = name.replace('''encoder.mask_token''' ,'''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
_a : List[str] = name.replace('''encoder.patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
_a : Tuple = name.replace('''encoder.patch_embed.norm''' ,'''embeddings.norm''' )
if "attn.proj" in name:
_a : List[str] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
_a : int = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
_a : List[str] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
_a : int = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
_a : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
_a : int = name.replace('''mlp.fc2''' ,'''output.dense''' )
if name == "encoder.norm.weight":
_a : Dict = '''layernorm.weight'''
if name == "encoder.norm.bias":
_a : Optional[Any] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
_a : List[Any] = '''swin.''' + name
return name
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a : Tuple = orig_state_dict.pop(__a )
if "attn_mask" in key:
pass
elif "qkv" in key:
_a : List[str] = key.split('''.''' )
_a : Dict = int(key_split[2] )
_a : Any = int(key_split[4] )
_a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a : str = val[:dim, :]
_a : List[Any] = val[
dim : dim * 2, :
]
_a : Tuple = val[-dim:, :]
else:
_a : Optional[Any] = val[
:dim
]
_a : List[Any] = val[
dim : dim * 2
]
_a : str = val[
-dim:
]
else:
_a : Dict = val
return orig_state_dict
def __UpperCAmelCase ( __a : Any ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : Any ) -> List[Any]:
"""simple docstring"""
_a : Any = torch.load(__a ,map_location='''cpu''' )['''model''']
_a : Optional[Any] = get_swin_config(__a )
_a : int = SwinForMaskedImageModeling(__a )
model.eval()
_a : List[str] = convert_state_dict(__a ,__a )
model.load_state_dict(__a )
_a : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Tuple = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
_a : Any = Image.open(requests.get(__a ,stream=__a ).raw )
_a : str = image_processor(images=__a ,return_tensors='''pt''' )
with torch.no_grad():
_a : List[Any] = model(**__a ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = self.task_name.lower()
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = "train"
UpperCAmelCase__ : Optional[Any] = "dev"
UpperCAmelCase__ : int = "test"
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ) -> int:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , _a , )
_a : List[Any] = args
_a : List[Any] = glue_processors[args.task_name]()
_a : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(_a , _a ):
try:
_a : Optional[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a : int = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a : Union[str, Any] = label_list[2], label_list[1]
_a : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Optional[Any] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
_a : str = time.time()
_a : Union[str, Any] = torch.load(_a )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_a : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
_a : Tuple = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a : List[Any] = examples[:limit_length]
_a : Optional[Any] = glue_convert_examples_to_features(
_a , _a , max_length=args.max_seq_length , label_list=_a , output_mode=self.output_mode , )
_a : List[str] = time.time()
torch.save(self.features , _a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
def __lowercase ( self ) -> Optional[Any]:
return self.label_list
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any]=None ,__a : Dict=None ) -> List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=__a )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
UpperCAmelCase__ : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase__ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Benchmark training of model"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Trace memory line by line"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Whether to print environment information"} )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
UpperCAmelCase__ : str = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
UpperCAmelCase__ : str = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
UpperCAmelCase__ : str = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
UpperCAmelCase__ : str = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
UpperCAmelCase__ : str = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
UpperCAmelCase__ : str = field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
UpperCAmelCase__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __lowercase ( self ) -> Union[str, Any]:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _a , )
def __lowercase ( self ) -> List[Any]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __lowercase ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __lowercase ( self ) -> List[str]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=2 , _a=3 , _a=4 , _a=2 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=9_9 , _a=3_6 , _a=2 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=6 , _a=6 , _a=3 , _a=4 , _a=None , _a=1_0_0_0 , ) -> List[Any]:
_a : Optional[int] = parent
_a : Dict = batch_size
_a : Optional[int] = num_channels
_a : Tuple = image_size
_a : str = patch_size
_a : int = is_training
_a : List[str] = use_input_mask
_a : List[str] = use_token_type_ids
_a : Tuple = use_labels
_a : Optional[Any] = vocab_size
_a : int = hidden_size
_a : List[Any] = num_hidden_layers
_a : Union[str, Any] = num_attention_heads
_a : str = intermediate_size
_a : Any = hidden_act
_a : Any = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : Tuple = type_vocab_size
_a : List[str] = type_sequence_label_size
_a : Dict = initializer_range
_a : List[str] = coordinate_size
_a : List[str] = shape_size
_a : Optional[int] = num_labels
_a : Optional[int] = num_choices
_a : Optional[int] = scope
_a : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_a : str = text_seq_length
_a : List[str] = (image_size // patch_size) ** 2 + 1
_a : List[str] = self.text_seq_length + self.image_seq_length
def __lowercase ( self ) -> Any:
_a : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_a : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : Optional[int] = bbox[i, j, 3]
_a : str = bbox[i, j, 1]
_a : Optional[int] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : int = bbox[i, j, 2]
_a : Optional[Any] = bbox[i, j, 0]
_a : List[str] = tmp_coordinate
_a : Optional[int] = tf.constant(_a )
_a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
if self.use_input_mask:
_a : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
_a : List[Any] = None
if self.use_token_type_ids:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_a : int = None
_a : Optional[Any] = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_a : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> List[Any]:
_a : Union[str, Any] = TFLayoutLMvaModel(config=_a )
# text + image
_a : str = model(_a , pixel_values=_a , training=_a )
_a : List[Any] = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , training=_a , )
_a : Tuple = model(_a , bbox=_a , pixel_values=_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_a : int = model(_a , training=_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_a : List[str] = model({'''pixel_values''': pixel_values} , training=_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
_a : Optional[Any] = self.num_labels
_a : str = TFLayoutLMvaForSequenceClassification(config=_a )
_a : str = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , labels=_a , training=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
_a : str = self.num_labels
_a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=_a )
_a : str = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , labels=_a , training=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_a : Tuple = 2
_a : str = TFLayoutLMvaForQuestionAnswering(config=_a )
_a : Dict = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , training=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self ) -> List[str]:
_a : Any = self.prepare_config_and_inputs()
((_a) , (_a) , (_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : Optional[int] = config_and_inputs
_a : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : str = False
def __lowercase ( self , _a , _a , _a , _a , _a ) -> Optional[int]:
return True
def __lowercase ( self , _a , _a , _a=False ) -> dict:
_a : List[str] = copy.deepcopy(_a )
if model_class in get_values(_a ):
_a : List[Any] = {
k: tf.tile(tf.expand_dims(_a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_a ):
_a : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_a ):
_a : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_a ):
_a : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_a ):
_a : Any = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowercase ( self ) -> Tuple:
_a : int = TFLayoutLMvaModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
if getattr(_a , '''hf_compute_loss''' , _a ):
# The number of elements in the loss should be the same as the number of elements in the label
_a : str = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_a : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_a )[0]
]
_a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_a : Tuple = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_a : Optional[Any] = prepared_for_class.pop('''input_ids''' )
_a : List[str] = model(_a , **_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_a : int = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_a : str = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
_a : Tuple = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_a : List[Any] = -1_0_0
_a : Tuple = tf.convert_to_tensor(_a )
_a : Optional[Any] = model(_a , **_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_a : List[Any] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_a : Dict = model(_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
# Get keys that were added with the _prepare_for_class function
_a : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
_a : Optional[int] = inspect.signature(model.call ).parameters
_a : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_a : Optional[int] = {0: '''input_ids'''}
for label_key in label_keys:
_a : int = signature_names.index(_a )
_a : int = label_key
_a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_a : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_a : Optional[Any] = prepared_for_class[value]
_a : List[Any] = tuple(_a )
# Send to model
_a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowercase ( self ) -> List[Any]:
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_a , _a , _a , _a , _a , _a )
def __lowercase ( self ) -> Tuple:
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : int = type
self.model_tester.create_and_check_model(_a , _a , _a , _a , _a , _a )
def __lowercase ( self ) -> str:
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_a , _a , _a , _a , _a , _a , _a )
def __lowercase ( self ) -> int:
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_a , _a , _a , _a , _a , _a , _a )
def __lowercase ( self ) -> Any:
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_a , _a , _a , _a , _a , _a , _a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[Any] = TFLayoutLMvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Tuple:
return LayoutLMvaImageProcessor(apply_ocr=_a ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> List[Any]:
_a : Dict = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
_a : Tuple = self.default_image_processor
_a : List[str] = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''tf''' ).pixel_values
_a : Optional[Any] = tf.constant([[1, 2]] )
_a : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_a : Dict = model(input_ids=_a , bbox=_a , pixel_values=_a , training=_a )
# verify the logits
_a : Union[str, Any] = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , _a )
_a : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1e-4 ) )
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
a__ = parser.parse_args()
a__ = '''cpu'''
a__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
a__ = '''path-to-your-trained-model'''
a__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a__ = pipe.to(device)
# to channels last
a__ = pipe.unet.to(memory_format=torch.channels_last)
a__ = pipe.vae.to(memory_format=torch.channels_last)
a__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a__ = torch.randn(2, 4, 64, 64)
a__ = torch.rand(1) * 999
a__ = torch.randn(2, 77, 768)
a__ = (sample, timestep, encoder_hidden_status)
try:
a__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a__ = 666
a__ = torch.Generator(device).manual_seed(seed)
a__ = {'''generator''': generator}
if args.steps is not None:
a__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
import re
import string
import numpy as np
import datasets
a__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
a__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
a__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def __lowercase ( self , _a , _a , _a=None , _a=False , _a=False , _a=False , ) -> Optional[int]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_a : Tuple = np.array([re.sub(_a , '''''' , _a ) for x in predictions] )
_a : Dict = np.array([re.sub(_a , '''''' , _a ) for x in references] )
else:
_a : Optional[int] = np.asarray(_a )
_a : Optional[int] = np.asarray(_a )
if ignore_case:
_a : str = np.char.lower(_a )
_a : Optional[Any] = np.char.lower(_a )
if ignore_punctuation:
_a : str = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
_a : Optional[Any] = np.char.translate(_a , table=_a )
_a : Optional[Any] = np.char.translate(_a , table=_a )
if ignore_numbers:
_a : Optional[int] = string.digits.maketrans('''''' , '''''' , string.digits )
_a : Any = np.char.translate(_a , table=_a )
_a : Union[str, Any] = np.char.translate(_a , table=_a )
_a : Optional[int] = predictions == references
return {"exact_match": np.mean(_a ) * 1_0_0}
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
a__ = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
a__ = '''▁'''
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_a : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : Tuple = vocab_file
_a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_a : Any = len(self.sp_model ) - 1
_a : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowercase ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
_a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Dict = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self ) -> List[str]:
return len(self.sp_model )
def __lowercase ( self ) -> int:
_a : Any = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : Union[str, Any] = self.sp_model.PieceToId(_a )
return spm_id if spm_id else self.unk_token_id
def __lowercase ( self , _a ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_a )
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : Union[str, Any] = []
_a : int = ''''''
_a : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_a : int = True
_a : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
_a : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ) -> Optional[int]:
_a : Optional[int] = self.__dict__.copy()
_a : List[Any] = None
return state
def __setstate__( self , _a ) -> Union[str, Any]:
_a : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Any = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[str] = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = LongformerTokenizer
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = LongformerTokenizerFast
UpperCAmelCase__ : Any = True
def __lowercase ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_a : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a : List[Any] = {'''unk_token''': '''<unk>'''}
_a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __lowercase ( self , **_a ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = '''lower newer'''
_a : Optional[int] = '''lower newer'''
return input_text, output_text
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : Any = '''lower newer'''
_a : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a : Optional[Any] = tokenizer.tokenize(_a ) # , add_prefix_space=True)
self.assertListEqual(_a , _a )
_a : Dict = tokens + [tokenizer.unk_token]
_a : Union[str, Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def __lowercase ( self ) -> int:
_a : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_a ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_a ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __lowercase ( self ) -> List[str]:
_a : Optional[Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
_a : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
_a : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
_a : List[str] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_a , add_prefix_space=_a )
_a : Optional[int] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_a , add_prefix_space=_a )
_a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a )
_a : Optional[int] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.get_tokenizer()
_a : Any = '''Encode this sequence.'''
_a : Tuple = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a : Any = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
_a : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_a , _a )
_a : str = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
_a : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_a , _a )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a : List[str] = tokenizer.encode(_a , add_special_tokens=_a )
_a : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_a , _a )
# Testing spaces after special tokens
_a : str = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Optional[Any] = '''Encode <mask> sequence'''
_a : Tuple = '''Encode <mask>sequence'''
_a : Optional[Any] = tokenizer.encode(_a )
_a : Optional[Any] = encoded.index(_a )
_a : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_a , _a )
_a : Optional[Any] = tokenizer.encode(_a )
_a : int = encoded.index(_a )
_a : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_a , _a )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Tuple = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_a : Union[str, Any] = self.tokenizer_class.from_pretrained(_a , **_a )
_a : List[Any] = '''A, <mask> AllenNLP sentence.'''
_a : str = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
_a : List[str] = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def __lowercase ( self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _a )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _a )
self.assertEqual(post_processor_state['''trim_offsets'''] , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : List[str] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a : List[str] = F"""{text_of_1_token} {text_of_1_token}"""
_a : List[str] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_a : Tuple = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : int = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_a : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
_a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : Union[str, Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
_a : List[str] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
_a : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
_a : List[str] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_a : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = AlbertTokenizer
UpperCAmelCase__ : Any = AlbertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Union[str, Any] = True
def __lowercase ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = AlbertTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self , _a ) -> Optional[int]:
_a : Union[str, Any] = '''this is a test'''
_a : Union[str, Any] = '''this is a test'''
return input_text, output_text
def __lowercase ( self ) -> Tuple:
_a : List[Any] = '''<pad>'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(_a ) , 3_0_0_0_0 )
def __lowercase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowercase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_a : int = self.get_tokenizer()
_a : Any = self.get_rust_tokenizer()
_a : Any = '''I was born in 92000, and this is falsé.'''
_a : int = tokenizer.tokenize(_a )
_a : Optional[int] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : Dict = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : List[str] = tokenizer.encode(_a )
_a : Union[str, Any] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Dict = AlbertTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [4_8, 2_5, 2_1, 1_2_8_9] )
_a : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_a : Tuple = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(_a , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
_a : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = AlbertTokenizer(_a )
_a : str = tokenizer.encode('''sequence builders''' )
_a : Tuple = tokenizer.encode('''multi-sequence build''' )
_a : List[str] = tokenizer.build_inputs_with_special_tokens(_a )
_a : int = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowercase ( self ) -> Any:
# fmt: off
_a : Optional[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase__ : List[str] = 10
def __lowercase ( self , **_a ) -> Optional[int]:
_a : Any = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_a )
return config
def __lowercase ( self ) -> Union[str, Any]:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> Tuple:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowercase ( self ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def __lowercase ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> Optional[Any]:
_a : int = self.scheduler_classes[0]
_a : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_a : Optional[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter * scheduler.init_noise_sigma
_a : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Tuple = scheduler.scale_model_input(_a , _a )
_a : Dict = model(_a , _a )
_a : str = scheduler.step(_a , _a , _a )
_a : Dict = output.prev_sample
_a : Optional[Any] = torch.sum(torch.abs(_a ) )
_a : Any = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def __lowercase ( self ) -> Dict:
if torch_device == "mps":
return
_a : Optional[int] = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : Optional[int] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_a : Any = self.dummy_model()
_a : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_a : Optional[Any] = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Optional[Any] = scheduler.scale_model_input(_a , _a )
_a : List[Any] = model(_a , _a )
_a : Optional[Any] = scheduler.step(_a , _a , _a )
_a : Optional[Any] = output.prev_sample
_a : Dict = torch.sum(torch.abs(_a ) )
_a : Tuple = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def __lowercase ( self ) -> Dict:
if torch_device == "mps":
return
_a : List[str] = self.scheduler_classes[0]
_a : str = self.get_scheduler_config()
_a : List[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_a : Union[str, Any] = self.dummy_model()
_a : Tuple = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a : str = scheduler.scale_model_input(_a , _a )
_a : int = model(_a , _a )
_a : int = scheduler.step(_a , _a , _a )
_a : Dict = output.prev_sample
_a : Tuple = torch.sum(torch.abs(_a ) )
_a : Union[str, Any] = torch.mean(torch.abs(_a ) )
if str(_a ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
from __future__ import annotations
from typing import Any
def __UpperCAmelCase ( __a : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
_a : List[Any] = {'''+''', '''-''', '''*''', '''/'''}
_a : list[Any] = []
for token in postfix_notation:
if token in operations:
_a , _a : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__a ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
from typing import List
import numpy as np
def __UpperCAmelCase ( __a : dict ) -> int:
"""simple docstring"""
_a : int = {key: len(__a ) for key, value in gen_kwargs.items() if isinstance(__a ,__a )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_a : Union[str, Any] = max(lists_lengths.values() ,default=0 )
return max(1 ,__a )
def __UpperCAmelCase ( __a : int ,__a : int ) -> List[range]:
"""simple docstring"""
_a : List[str] = []
for group_idx in range(__a ):
_a : str = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_a : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_a : Dict = range(__a ,start + num_shards_to_add )
shards_indices_per_group.append(__a )
return shards_indices_per_group
def __UpperCAmelCase ( __a : dict ,__a : int ) -> List[dict]:
"""simple docstring"""
_a : Dict = _number_of_shards_in_gen_kwargs(__a )
if num_shards == 1:
return [dict(__a )]
else:
_a : Optional[int] = _distribute_shards(num_shards=__a ,max_num_jobs=__a )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__a ,__a )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__a ) )
]
def __UpperCAmelCase ( __a : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,__a )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __UpperCAmelCase ( __a : np.random.Generator ,__a : dict ) -> dict:
"""simple docstring"""
_a : Dict = {len(__a ) for value in gen_kwargs.values() if isinstance(__a ,__a )}
_a : List[Any] = {}
for size in list_sizes:
_a : Dict = list(range(__a ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_a : List[Any] = dict(__a )
for key, value in shuffled_kwargs.items():
if isinstance(__a ,__a ):
_a : Tuple = [value[i] for i in indices_per_size[len(__a )]]
return shuffled_kwargs
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = ["image_processor", "tokenizer"]
UpperCAmelCase__ : List[str] = "BridgeTowerImageProcessor"
UpperCAmelCase__ : List[str] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _a , _a ) -> Dict:
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : List[Any] = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> Tuple:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Tuple:
_a : str = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
UpperCAmelCase__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
UpperCAmelCase__ : str = "question"
UpperCAmelCase__ : str = "context"
UpperCAmelCase__ : str = "answers"
@property
def __lowercase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
from maths.prime_check import is_prime
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : str = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if is_prime(__a ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.