code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
A__ = OmegaConf.load(UpperCAmelCase_ )
A__ = torch.load(UpperCAmelCase_ , map_location="""cpu""" )["""model"""]
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = """first_stage_model."""
for key in keys:
if key.startswith(UpperCAmelCase_ ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = """model.diffusion_model."""
for key in keys:
if key.startswith(UpperCAmelCase_ ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**UpperCAmelCase_ ).eval()
vqvae.load_state_dict(UpperCAmelCase_ )
A__ = UNetLDMModel(**UpperCAmelCase_ ).eval()
unet.load_state_dict(UpperCAmelCase_ )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCAmelCase_ , )
A__ = LDMPipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipeline.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 335 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
A__ = HfArgumentParser(UpperCAmelCase_ )
A__ = parser.parse_args_into_dataclasses()[0]
A__ = TensorFlowBenchmark(args=UpperCAmelCase_ )
try:
A__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A__ = """ """.join(str(UpperCAmelCase_ ).split(""" """ )[:-1] )
A__ = """"""
A__ = eval(str(UpperCAmelCase_ ).split(""" """ )[-1] )
A__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
A__ = full_error_msg + begin_error_msg + str(UpperCAmelCase_ )
raise ValueError(UpperCAmelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=7 ):
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
A__ = """636036"""
A__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
A__ = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
return result["workflow_runs"]
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
A__ = get_daily_ci_runs(UpperCAmelCase_ )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["""id"""]
break
return workflow_run_id
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
A__ = get_last_daily_ci_runs(UpperCAmelCase_ )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=UpperCAmelCase_ , token=UpperCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase_ , artifact_url=UpperCAmelCase_ , output_dir=UpperCAmelCase_ , token=UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
get_last_daily_ci_artifacts(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(UpperCAmelCase_ , F"""{artifact_name}.zip""" )
if os.path.isfile(UpperCAmelCase_ ):
A__ = {}
with zipfile.ZipFile(UpperCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
with z.open(UpperCAmelCase_ ) as f:
A__ = f.read().decode("""UTF-8""" )
return results
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = ['DPTFeatureExtractor']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Dict = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 1 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: int=13 , UpperCamelCase: List[str]=7 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=True , UpperCamelCase: int=True , UpperCamelCase: List[Any]=99 , UpperCamelCase: List[str]=32 , UpperCamelCase: int=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: str="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: str=5_12 , UpperCamelCase: int=16 , UpperCamelCase: int=2 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: Any=4 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCamelCase )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
A__ = model(UpperCamelCase )[0]
A__ = 5_00_00
A__ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase )
A__ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_ : Dict = Lock()
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(UpperCAmelCase_ , UpperCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(UpperCAmelCase_ , UpperCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(UpperCAmelCase_ ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(
len(UpperCAmelCase_ ) - 1,
arr[len(UpperCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase_ ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _snake_case ( ):
A__ = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*UpperCAmelCase_ )
A__ = odd_even_transposition(UpperCAmelCase_ )
print("""Sorted List\n""" )
print(*UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 335 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
SCREAMING_SNAKE_CASE_ : List[str] = logging.getLogger(__name__)
class a :
"""simple docstring"""
def __init__( self: List[str] ):
"""simple docstring"""
A__ = False
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
if not self.initialized:
A__ = RagRetriever(
UpperCamelCase , question_encoder_tokenizer=UpperCamelCase , generator_tokenizer=UpperCamelCase , index=UpperCamelCase , init_retrieval=UpperCamelCase , )
A__ = True
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.retriever.index.init_index()
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ , A__ = self.retriever._main_retrieve(UpperCamelCase , UpperCamelCase )
return doc_ids, retrieved_doc_embeds
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any]=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(UpperCamelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
UpperCamelCase , question_encoder_tokenizer=UpperCamelCase , generator_tokenizer=UpperCamelCase , index=UpperCamelCase , init_retrieval=UpperCamelCase , )
A__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for worker in self.retrieval_workers
] )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: str ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A__ , A__ = ray.get(random_worker.retrieve.remote(UpperCamelCase , UpperCamelCase ) )
else:
A__ , A__ = self._main_retrieve(UpperCamelCase , UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase )
@classmethod
def UpperCamelCase ( cls: List[str] , UpperCamelCase: Any , UpperCamelCase: Any=None , **UpperCamelCase: List[Any] ):
"""simple docstring"""
return super(UpperCamelCase , cls ).get_tokenizers(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase ( cls: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Tuple=None , **UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = kwargs.pop("""config""" , UpperCamelCase ) or RagConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
A__ = RagTokenizer.from_pretrained(UpperCamelCase , config=UpperCamelCase )
A__ = rag_tokenizer.question_encoder
A__ = rag_tokenizer.generator
if indexed_dataset is not None:
A__ = """custom"""
A__ = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase )
else:
A__ = cls._build_index(UpperCamelCase )
return cls(
UpperCamelCase , question_encoder_tokenizer=UpperCamelCase , generator_tokenizer=UpperCamelCase , retrieval_workers=UpperCamelCase , index=UpperCamelCase , )
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 400_0000 ):
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(UpperCAmelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 335 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int | float | str ):
try:
A__ = float(UpperCAmelCase_ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
A__ = decimal - int(UpperCAmelCase_ )
if fractional_part == 0:
return int(UpperCAmelCase_ ), 1
else:
A__ = len(str(UpperCAmelCase_ ).split(""".""" )[1] )
A__ = int(decimal * (10**number_of_frac_digits) )
A__ = 10**number_of_frac_digits
A__ , A__ = denominator, numerator
while True:
A__ = dividend % divisor
if remainder == 0:
break
A__ , A__ = divisor, remainder
A__ , A__ = numerator / divisor, denominator / divisor
return int(UpperCAmelCase_ ), int(UpperCAmelCase_ )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ["vqvae"]
def __init__( self: List[str] , UpperCamelCase: AutoencoderKL , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Mel , UpperCamelCase: Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase , mel=UpperCamelCase , vqvae=UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return 50 if isinstance(self.scheduler , UpperCamelCase ) else 10_00
@torch.no_grad()
def __call__( self: List[Any] , UpperCamelCase: int = 1 , UpperCamelCase: str = None , UpperCamelCase: np.ndarray = None , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = None , UpperCamelCase: torch.Generator = None , UpperCamelCase: float = 0 , UpperCamelCase: float = 0 , UpperCamelCase: torch.Generator = None , UpperCamelCase: float = 0 , UpperCamelCase: torch.Tensor = None , UpperCamelCase: torch.Tensor = None , UpperCamelCase: Optional[Any]=True , ):
"""simple docstring"""
A__ = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase )
A__ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A__ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A__ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase , device=self.device , )
A__ = noise
A__ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase , UpperCamelCase )
A__ = self.mel.audio_slice_to_image(UpperCamelCase )
A__ = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A__ = (input_image / 2_55) * 2 - 1
A__ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A__ = self.vqvae.encode(torch.unsqueeze(UpperCamelCase , 0 ) ).latent_dist.sample(
generator=UpperCamelCase )[0]
A__ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A__ = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
A__ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A__ = int(mask_start_secs * pixels_per_second )
A__ = int(mask_end_secs * pixels_per_second )
A__ = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase ):
A__ = self.unet(UpperCamelCase , UpperCamelCase , UpperCamelCase )["""sample"""]
else:
A__ = self.unet(UpperCamelCase , UpperCamelCase )["""sample"""]
if isinstance(self.scheduler , UpperCamelCase ):
A__ = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , )["""prev_sample"""]
else:
A__ = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A__ = mask[:, step, :, :mask_start]
if mask_end > 0:
A__ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A__ = 1 / self.vqvae.config.scaling_factor * images
A__ = self.vqvae.decode(UpperCamelCase )["""sample"""]
A__ = (images / 2 + 0.5).clamp(0 , 1 )
A__ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
A__ = (images * 2_55).round().astype("""uint8""" )
A__ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
A__ = [self.mel.image_to_audio(UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase ) )
@torch.no_grad()
def UpperCamelCase ( self: int , UpperCamelCase: List[Image.Image] , UpperCamelCase: int = 50 ):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase )
A__ = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A__ = (sample / 2_55) * 2 - 1
A__ = torch.Tensor(UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
A__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A__ = self.scheduler.alphas_cumprod[t]
A__ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
A__ = self.unet(UpperCamelCase , UpperCamelCase )["""sample"""]
A__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase ( UpperCamelCase: torch.Tensor , UpperCamelCase: torch.Tensor , UpperCamelCase: float ):
"""simple docstring"""
A__ = acos(torch.dot(torch.flatten(UpperCamelCase ) , torch.flatten(UpperCamelCase ) ) / torch.norm(UpperCamelCase ) / torch.norm(UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase ) + sin(alpha * theta ) * xa / sin(UpperCamelCase )
| 335 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = BloomTokenizerFast
UpperCAmelCase = BloomTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = "tokenizer_file"
UpperCAmelCase = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
super().setUp()
A__ = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
A__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
A__ = tokenizer.batch_encode_plus(UpperCamelCase )["""input_ids"""]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: List[Any]=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = """This is a simple input"""
A__ = ["""This is a simple input 1""", """This is a simple input 2"""]
A__ = ("""This is a simple input""", """This is a pair""")
A__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
A__ = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=UpperCamelCase )
A__ = next(iter(UpperCamelCase ) )["""premise"""] # pick up one data
A__ = list(sample_data.values() )
A__ = list(map(tokenizer.encode , UpperCamelCase ) )
A__ = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 335 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 1 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "align_text_model"
def __init__( self: List[Any] , UpperCamelCase: Optional[Any]=3_05_22 , UpperCamelCase: Dict=7_68 , UpperCamelCase: Tuple=12 , UpperCamelCase: Optional[int]=12 , UpperCamelCase: int=30_72 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: List[Any]=2 , UpperCamelCase: Dict=0.02 , UpperCamelCase: Any=1e-1_2 , UpperCamelCase: Optional[Any]=0 , UpperCamelCase: Optional[Any]="absolute" , UpperCamelCase: str=True , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = pad_token_id
@classmethod
def UpperCamelCase ( cls: Optional[int] , UpperCamelCase: Union[str, os.PathLike] , **UpperCamelCase: Any ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase )
A__ , A__ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
A__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "align_vision_model"
def __init__( self: List[Any] , UpperCamelCase: int = 3 , UpperCamelCase: int = 6_00 , UpperCamelCase: float = 2.0 , UpperCamelCase: float = 3.1 , UpperCamelCase: int = 8 , UpperCamelCase: List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase: List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCamelCase: List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCamelCase: List[int] = [] , UpperCamelCase: List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase: List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase: List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase: float = 0.25 , UpperCamelCase: str = "swish" , UpperCamelCase: int = 25_60 , UpperCamelCase: str = "mean" , UpperCamelCase: float = 0.02 , UpperCamelCase: float = 0.001 , UpperCamelCase: float = 0.99 , UpperCamelCase: float = 0.2 , **UpperCamelCase: Any , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = num_channels
A__ = image_size
A__ = width_coefficient
A__ = depth_coefficient
A__ = depth_divisor
A__ = kernel_sizes
A__ = in_channels
A__ = out_channels
A__ = depthwise_padding
A__ = strides
A__ = num_block_repeats
A__ = expand_ratios
A__ = squeeze_expansion_ratio
A__ = hidden_act
A__ = hidden_dim
A__ = pooling_type
A__ = initializer_range
A__ = batch_norm_eps
A__ = batch_norm_momentum
A__ = drop_connect_rate
A__ = sum(UpperCamelCase ) * 4
@classmethod
def UpperCamelCase ( cls: int , UpperCamelCase: Union[str, os.PathLike] , **UpperCamelCase: List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase )
A__ , A__ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
A__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "align"
UpperCAmelCase = True
def __init__( self: Dict , UpperCamelCase: List[Any]=None , UpperCamelCase: str=None , UpperCamelCase: Any=6_40 , UpperCamelCase: List[str]=1.0 , UpperCamelCase: int=0.02 , **UpperCamelCase: Any , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
if text_config is None:
A__ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
A__ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
A__ = AlignTextConfig(**UpperCamelCase )
A__ = AlignVisionConfig(**UpperCamelCase )
A__ = projection_dim
A__ = temperature_init_value
A__ = initializer_range
@classmethod
def UpperCamelCase ( cls: Optional[Any] , UpperCamelCase: AlignTextConfig , UpperCamelCase: AlignVisionConfig , **UpperCamelCase: int ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 335 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 1 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( UpperCAmelCase_ : List[Any] ):
for param in module.parameters():
A__ = False
def _snake_case ( ):
A__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A__ = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = plt.imshow(UpperCAmelCase_ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase_ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase_ )
plt.show()
def _snake_case ( ):
A__ = datetime.now()
A__ = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 335 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 1 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCAmelCase_ : int = 4 ):
A__ = abs(UpperCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(UpperCAmelCase_ )] for y in range(UpperCAmelCase_ )]
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
return reverse_row(transpose(UpperCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
return reverse_row(reverse_column(UpperCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
return reverse_column(transpose(UpperCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
A__ = [list(UpperCAmelCase_ ) for x in zip(*UpperCAmelCase_ )]
return matrix
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
A__ = matrix[::-1]
return matrix
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
A__ = [x[::-1] for x in matrix]
return matrix
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
for i in matrix:
print(*UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE_ : Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE_ : int = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 335 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 1 |
"""simple docstring"""
from collections import defaultdict
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
A__ = first_str.lower().strip()
A__ = second_str.lower().strip()
# Remove whitespace
A__ = first_str.replace(""" """ , """""" )
A__ = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
return False
# Default values for count should be 0
A__ = defaultdict(UpperCAmelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCAmelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ : Tuple = input('Enter the first string ').strip()
SCREAMING_SNAKE_CASE_ : Tuple = input('Enter the second string ').strip()
SCREAMING_SNAKE_CASE_ : List[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __A :
def __init__(self : Any , __a : str , __a : List[str]=13 , __a : Optional[int]=30 , __a : Tuple=2 , __a : str=3 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=32 , __a : Optional[int]=2 , __a : int=4 , __a : Optional[Any]=37 , __a : Optional[Any]="gelu" , __a : Optional[Any]=0.1 , __a : int=0.1 , __a : int=10 , __a : Optional[int]=0.02 , __a : Dict=3 , __a : Optional[int]=None , __a : List[str]=2 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 2
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self : List[str] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase (self : Union[str, Any] , __a : str , __a : Any , __a : Dict ):
UpperCAmelCase_ = TFDeiTModel(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : str , __a : Optional[int] , __a : List[str] , __a : Tuple ):
UpperCAmelCase_ = TFDeiTForMaskedImageModeling(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFDeiTForMaskedImageModeling(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase (self : List[Any] , __a : List[str] , __a : int , __a : int ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFDeiTForImageClassification(__a )
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFDeiTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a__ : Union[str, Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a__ : Tuple = False
a__ : List[Any] = False
a__ : Any = False
a__ : Dict = False
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFDeiTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _lowercase (self : List[str] ):
pass
def _lowercase (self : List[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Dense ) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Optional[Any] , __a : Tuple , __a : Tuple , __a : Union[str, Any]=False ):
UpperCAmelCase_ = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase (self : int ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFDeiTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : Union[str, Any] ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**__a )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 1 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase : Tuple = get_logger(__name__)
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase__ = Extractor
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase__ = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : str , UpperCamelCase : bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : bool = False ):
'''simple docstring'''
lowercase__ = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
lowercase__ = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@classmethod
@abstractmethod
def UpperCamelCase__ (cls : Any , UpperCamelCase : Union[Path, str] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
...
class __lowerCAmelCase (lowercase_ , lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[bytes] = []
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : int ):
'''simple docstring'''
with open(UpperCamelCase , '''rb''' ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCamelCase__ (cls : int , UpperCamelCase : Union[Path, str] , UpperCamelCase : bytes = b"" ):
'''simple docstring'''
if not magic_number:
lowercase__ = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
lowercase__ = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ (cls : Optional[int] , UpperCamelCase : Union[Path, str] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
def resolved(UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase : str , UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase : int , UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase__ = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
lowercase__ = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowercase__ = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
with gzip.open(UpperCamelCase , '''rb''' ) as gzip_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ (cls : Tuple , UpperCamelCase : Union[Path, str] , UpperCamelCase : bytes = b"" ):
'''simple docstring'''
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , '''rb''' ) as fp:
lowercase__ = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase__ = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
lowercase__ = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowercase__ = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowercase__ = zstd.ZstdDecompressor()
with open(UpperCamelCase , '''rb''' ) as ifh, open(UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
with bza.open(UpperCamelCase , '''rb''' ) as compressed_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , '''r''' ) as archive:
archive.extractall(UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(UpperCamelCase , '''rb''' ) as compressed_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ (cls : Optional[Any] ):
'''simple docstring'''
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCamelCase__ (cls : Any , UpperCamelCase : Union[Path, str] , UpperCamelCase : bool = False ):
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=UpperCamelCase , )
lowercase__ = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ (cls : int , UpperCamelCase : Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
lowercase__ = cls._get_magic_number_max_length()
lowercase__ = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCamelCase__ (cls : Optional[Any] , UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
lowercase__ = str(Path(UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=UpperCamelCase , )
lowercase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
lowercase__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 2 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=[] ):
'''simple docstring'''
A : Union[str, Any] = size[0] - overlap_pixels * 2
A : str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
A : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
A : Dict = np.pad(snake_case__ , mode='''linear_ramp''' , pad_width=snake_case__ , end_values=0 )
if "l" in remove_borders:
A : Any = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
A : Any = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
A : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
A : Union[str, Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return max(snake_case__ , min(snake_case__ , snake_case__ ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = list(snake_case__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
A : str = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case__ , (original_slice, 0) )
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
A : Union[str, Any] = tile.crop(snake_case__ )
return tile
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = n % d
return n - divisor
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 350 , ) -> List[Any]:
"""simple docstring"""
super().__init__(
vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , max_noise_level=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
A : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
A : List[Any] = add_overlap_rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image.size )
A : Dict = image.crop(SCREAMING_SNAKE_CASE )
A : Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
A : Any = translated_slice_x - (original_image_slice / 2)
A : Optional[Any] = max(0 , SCREAMING_SNAKE_CASE )
A : List[str] = squeeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = to_input.size
A : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
A : str = super(SCREAMING_SNAKE_CASE , self ).__call__(image=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).images[0]
A : str = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
A : int = unsqueeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
A : Optional[int] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
A : Optional[Any] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE ) , mode='''L''' , )
final_image.paste(
SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 75 , SCREAMING_SNAKE_CASE = 9.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 32 , ) -> Dict:
"""simple docstring"""
A : str = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
A : Tuple = math.ceil(image.size[0] / tile_size )
A : List[Any] = math.ceil(image.size[1] / tile_size )
A : Optional[int] = tcx * tcy
A : int = 0
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
self._process_tile(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prompt=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , noise_level=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
A : int = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='''fp16''' , torch_dtype=torch.floataa )
A : Dict = pipe.to('''cuda''' )
A : Tuple = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(snake_case__ ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('''diffusers_library_progress.jpg''' )
A : Optional[int] = pipe(image=snake_case__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=snake_case__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 3 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : bool , lowerCamelCase : bool ):
def run_func(lowerCamelCase : List[str] ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase : List[str] , **lowerCamelCase : Tuple ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase : List[Any] , **lowerCamelCase : Any ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def __UpperCAmelCase ( self : int ) -> Optional[int]:
return tf.__version__
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , training=UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
UpperCAmelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCAmelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__ )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(UpperCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(UpperCAmelCase__ )
lowerCAmelCase = Memory(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(UpperCAmelCase__ )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 4 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "utf-8"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True # deprecated
SCREAMING_SNAKE_CASE__ = None # deprecated
SCREAMING_SNAKE_CASE__ = 10 << 20 # 10MB
SCREAMING_SNAKE_CASE__ = None
class lowerCamelCase__ ( datasets.ArrowBasedBuilder):
SCREAMING_SNAKE_CASE__ = JsonConfig
def __A (self ) -> Dict:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
_lowercase =self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __A (self , UpperCAmelCase ) -> Any:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowercase =dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
_lowercase =data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =[files]
_lowercase =[dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_lowercase =[]
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =[files]
_lowercase =[dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def __A (self , UpperCAmelCase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_lowercase =self.config.features.arrow_schema.field(UpperCAmelCase ).type
_lowercase =pa_table.append_column(UpperCAmelCase , pa.array([None] * len(UpperCAmelCase ) , type=UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase =table_cast(UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def __A (self , UpperCAmelCase ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase =json.load(UpperCAmelCase )
# We keep only the field we are interested in
_lowercase =dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase , (list, tuple) ):
_lowercase =set().union(*[row.keys() for row in dataset] )
_lowercase ={col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
else:
_lowercase =dataset
_lowercase =pa.Table.from_pydict(UpperCAmelCase )
yield file_idx, self._cast_table(UpperCAmelCase )
# If the file has one json object per line
else:
with open(UpperCAmelCase , '''rb''' ) as f:
_lowercase =0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowercase =max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
_lowercase =(
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_lowercase =f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowercase =batch.decode(self.config.encoding , errors=UpperCAmelCase ).encode('''utf-8''' )
try:
while True:
try:
_lowercase =paj.read_json(
io.BytesIO(UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase )
or block_size > len(UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(UpperCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase =json.load(UpperCAmelCase )
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(UpperCAmelCase )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase , UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
_lowercase =set().union(*[row.keys() for row in dataset] )
_lowercase ={col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
_lowercase =pa.Table.from_pydict(UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(UpperCAmelCase )}: {e}" )
raise ValueError(f"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(UpperCAmelCase )
break
else:
logger.error(f"Failed to read file '{file}' with error {type(UpperCAmelCase )}: {e}" )
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
batch_idx += 1
| 5 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __A( a ):
snake_case_ = (DPMSolverSDEScheduler,)
snake_case_ = 1_0
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_snake_case )
return config
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(_snake_case , _snake_case )
__a = model(_snake_case , _snake_case )
__a = scheduler.step(_snake_case , _snake_case , _snake_case )
__a = output.prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(_snake_case , _snake_case )
__a = model(_snake_case , _snake_case )
__a = scheduler.step(_snake_case , _snake_case , _snake_case )
__a = output.prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(_snake_case , _snake_case )
__a = model(_snake_case , _snake_case )
__a = scheduler.step(_snake_case , _snake_case , _snake_case )
__a = output.prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
__a = sample.to(_snake_case )
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(_snake_case , _snake_case )
__a = model(_snake_case , _snake_case )
__a = scheduler.step(_snake_case , _snake_case , _snake_case )
__a = output.prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2 | 6 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "sentencepiece.bpe.model"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
lowercase_ = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = ['input_ids', 'attention_mask']
lowerCamelCase = []
lowerCamelCase = []
def __init__( self : List[Any],lowercase_ : Optional[Any],lowercase_ : List[Any]=None,lowercase_ : List[str]=None,lowercase_ : List[Any]="</s>",lowercase_ : List[Any]="</s>",lowercase_ : Dict="<s>",lowercase_ : Union[str, Any]="<unk>",lowercase_ : Dict="<pad>",lowercase_ : str="<mask>",lowercase_ : Optional[Dict[str, Any]] = None,**lowercase_ : List[Any],)-> None:
'''simple docstring'''
A__ = AddedToken(lowercase_,lstrip=lowercase_,rstrip=lowercase_ ) if isinstance(lowercase_,lowercase_ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = kwargs.get('additional_special_tokens',[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase_,tgt_lang=lowercase_,eos_token=lowercase_,unk_token=lowercase_,sep_token=lowercase_,cls_token=lowercase_,pad_token=lowercase_,mask_token=lowercase_,sp_model_kwargs=self.sp_model_kwargs,**lowercase_,)
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
A__ = len(self.sp_model )
A__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase_ )
}
A__ = {v: k for k, v in self.lang_code_to_id.items()}
A__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ = src_lang if src_lang is not None else 'en_XX'
A__ = self.lang_code_to_id[self._src_lang]
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case__ ( self : List[Any],lowercase_ : str )-> None:
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any )-> Dict:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : Tuple,lowercase_ : Dict )-> None:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self : int,lowercase_ : str )-> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase_,out_type=lowercase_ )
def snake_case__ ( self : Optional[Any],lowercase_ : str )-> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self : Optional[int],lowercase_ : int )-> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self : Optional[int],lowercase_ : Tuple )-> List[str]:
'''simple docstring'''
A__ = []
A__ = ''
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(lowercase_ )
A__ = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def snake_case__ ( self : Any,lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_,'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def snake_case__ ( self : int,lowercase_ : List[int],lowercase_ : Optional[List[int]] = None,lowercase_ : bool = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_,token_ids_a=lowercase_,already_has_special_tokens=lowercase_ )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def snake_case__ ( self : Union[str, Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self : Optional[int],lowercase_ : Dict,lowercase_ : str,lowercase_ : Optional[str],lowercase_ : Optional[str],**lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A__ = src_lang
A__ = self(lowercase_,add_special_tokens=lowercase_,return_tensors=lowercase_,**lowercase_ )
A__ = self.convert_tokens_to_ids(lowercase_ )
A__ = tgt_lang_id
return inputs
def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : str = "en_XX",lowercase_ : Optional[List[str]] = None,lowercase_ : str = "ro_RO",**lowercase_ : Optional[int],)-> BatchEncoding:
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(lowercase_,lowercase_,**lowercase_ )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self : List[str],lowercase_ : str )-> None:
'''simple docstring'''
A__ = self.lang_code_to_id[src_lang]
A__ = [self.cur_lang_code_id]
A__ = [self.eos_token_id]
def snake_case__ ( self : Union[str, Any],lowercase_ : str )-> None:
'''simple docstring'''
A__ = self.lang_code_to_id[tgt_lang]
A__ = [self.cur_lang_code_id]
A__ = [self.eos_token_id]
| 7 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "weight" in name:
snake_case_ = '''weight'''
elif "bias" in name:
snake_case_ = '''bias'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = SEWConfig()
if is_finetuned:
snake_case_ = model.wav_encoder.wav_model.cfg
else:
snake_case_ = model.cfg
snake_case_ = fs_config.conv_bias
snake_case_ = eval(fs_config.conv_feature_layers )
snake_case_ = [x[0] for x in conv_layers]
snake_case_ = [x[1] for x in conv_layers]
snake_case_ = [x[2] for x in conv_layers]
snake_case_ = '''gelu'''
snake_case_ = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
snake_case_ = 0.0
snake_case_ = fs_config.activation_fn.name
snake_case_ = fs_config.encoder_embed_dim
snake_case_ = 0.02
snake_case_ = fs_config.encoder_ffn_embed_dim
snake_case_ = 1E-5
snake_case_ = fs_config.encoder_layerdrop
snake_case_ = fs_config.encoder_attention_heads
snake_case_ = fs_config.conv_pos_groups
snake_case_ = fs_config.conv_pos
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
snake_case_ = fs_config.encoder_layers
snake_case_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case_ = model.cfg
snake_case_ = fs_config.final_dropout
snake_case_ = fs_config.layerdrop
snake_case_ = fs_config.activation_dropout
snake_case_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case_ = fs_config.attention_dropout
snake_case_ = fs_config.dropout_input
snake_case_ = fs_config.dropout
snake_case_ = fs_config.mask_channel_length
snake_case_ = fs_config.mask_channel_prob
snake_case_ = fs_config.mask_length
snake_case_ = fs_config.mask_prob
snake_case_ = '''Wav2Vec2FeatureExtractor'''
snake_case_ = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
if is_finetuned:
snake_case_, snake_case_, snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case_, snake_case_, snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case_ = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = convert_config(model[0] , SCREAMING_SNAKE_CASE__ )
snake_case_ = model[0].eval()
snake_case_ = True if config.feat_extract_norm == '''layer''' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ )
snake_case_ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE__ , )
snake_case_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ = SEWForCTC(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = SEWModel(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 8 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = 0
def __magic_name__( self :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> str:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : str = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[str] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(**lowerCAmelCase__ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase__ )
config.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__( self :Dict ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' )
def __magic_name__( self :Tuple ) -> List[str]:
with self.assertRaisesRegex(
lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__( self :Optional[Any] ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __magic_name__( self :Dict ) -> Tuple:
try:
AutoConfig.register('''custom''' , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Dict = Path(lowerCAmelCase__ ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : List[Any] = Path(lowerCAmelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(lowerCAmelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(lowerCAmelCase__ , '''w''' ) )
__SCREAMING_SNAKE_CASE : List[str] = CustomImageProcessor.from_pretrained(lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__( self :List[Any] ) -> int:
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
try:
AutoConfig.register('''custom''' , lowerCAmelCase__ )
AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(lowerCAmelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 9 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = ShapEImgaImgPipeline
lowercase_ = ["image"]
lowercase_ = ["image"]
lowercase_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase_ = False
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: List[str] =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCamelCase__: int =CLIPVisionModel(UpperCAmelCase_)
return model
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Union[str, Any] ={
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCamelCase__: Tuple =PriorTransformer(**UpperCAmelCase_)
return model
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Any ={
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCamelCase__: Optional[Any] =ShapERenderer(**UpperCAmelCase_)
return model
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =self.dummy_prior
lowerCamelCase__: Optional[Any] =self.dummy_image_encoder
lowerCamelCase__: Optional[int] =self.dummy_image_processor
lowerCamelCase__: Tuple =self.dummy_renderer
lowerCamelCase__: Any =HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=UpperCAmelCase_ , clip_sample=UpperCAmelCase_ , clip_sample_range=1.0 , )
lowerCamelCase__: str ={
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=0) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: int =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: int =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: str ={
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu"
lowerCamelCase__: int =self.get_dummy_components()
lowerCamelCase__: int =self.pipeline_class(**UpperCAmelCase_)
lowerCamelCase__: List[str] =pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: int =pipe(**self.get_dummy_inputs(UpperCAmelCase_))
lowerCamelCase__: int =output.images[0]
lowerCamelCase__: Tuple =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase__: Dict =np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =torch_device == "cpu"
lowerCamelCase__: List[str] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.get_dummy_components()
lowerCamelCase__: List[Any] =self.pipeline_class(**UpperCAmelCase_)
lowerCamelCase__: List[str] =pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: List[Any] =1
lowerCamelCase__: int =2
lowerCamelCase__: Tuple =self.get_dummy_inputs(UpperCAmelCase_)
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase__: Dict =batch_size * [inputs[key]]
lowerCamelCase__: Dict =pipe(**UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png")
lowerCamelCase__: Tuple =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy")
lowerCamelCase__: str =ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img")
lowerCamelCase__: int =pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: int =torch.Generator(device=UpperCAmelCase_).manual_seed(0)
lowerCamelCase__: Any =pipe(
UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_)
| 10 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase__ = datasets.logging.get_logger(__name__)
lowerCAmelCase__ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
lowerCAmelCase__ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
lowerCAmelCase__ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
lowerCAmelCase__ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
_A : Tuple = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
_A : Tuple = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A : str = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
_A : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
_A : Dict = score.BleurtScorer(os.path.join(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = self.scorer.score(references=__lowerCamelCase , candidates=__lowerCamelCase)
return {"scores": scores}
| 11 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['image_processor', 'tokenizer']
UpperCAmelCase__ : Dict = 'Pix2StructImageProcessor'
UpperCAmelCase__ : Any = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: str ):
__lowerCamelCase = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self: List[Any] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Union[bool, str, TruncationStrategy] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = 20_48 , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, TensorType]] = None , **UpperCamelCase_: Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase = self.tokenizer
__lowerCamelCase = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_patches=UpperCamelCase_ , **UpperCamelCase_ )
else:
# add pixel_values and bbox
__lowerCamelCase = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
if "attention_mask" in text_encoding:
__lowerCamelCase = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
__lowerCamelCase = text_encoding.pop("""input_ids""" )
else:
__lowerCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: List[str] , **UpperCamelCase_: List[Any] ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: str , **UpperCamelCase_: int ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 12 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
import colorsys
from PIL import Image # type: ignore
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = x
SCREAMING_SNAKE_CASE_: Dict = y
for step in range(_UpperCAmelCase ): # noqa: B007
SCREAMING_SNAKE_CASE_: Union[str, Any] = a * a - b * b + x
SCREAMING_SNAKE_CASE_: Union[str, Any] = 2 * a * b + y
SCREAMING_SNAKE_CASE_: List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_ ( _UpperCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def A_ ( _UpperCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(_UpperCAmelCase , 1 , 1 ) )
def A_ ( _UpperCAmelCase = 8_00 , _UpperCAmelCase = 6_00 , _UpperCAmelCase = -0.6 , _UpperCAmelCase = 0 , _UpperCAmelCase = 3.2 , _UpperCAmelCase = 50 , _UpperCAmelCase = True , ):
SCREAMING_SNAKE_CASE_: str = Image.new("RGB" , (image_width, image_height) )
SCREAMING_SNAKE_CASE_: List[Any] = img.load()
# loop through the image-coordinates
for image_x in range(_UpperCAmelCase ):
for image_y in range(_UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
SCREAMING_SNAKE_CASE_: Dict = figure_width / image_width * image_height
SCREAMING_SNAKE_CASE_: Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
SCREAMING_SNAKE_CASE_: Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
SCREAMING_SNAKE_CASE_: int = get_color_coded_rgb(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = get_black_and_white_rgb(_UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 13 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowerCamelCase : Any = None
_lowerCamelCase : str = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowerCamelCase : int = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1 , lowercase_=256 ) -> Optional[Any]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=True ) -> Tuple:
"""simple docstring"""
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ = os.path.join(lowercase_ , '''tmp''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ = read_json(os.path.join(lowercase_ , '''params.json''' ) )
A__ = NUM_SHARDS[model_size]
A__ = params['''n_layers''']
A__ = params['''n_heads''']
A__ = n_heads // num_shards
A__ = params['''dim''']
A__ = dim // n_heads
A__ = 1_00_00.0
A__ = 1.0 / (base ** (torch.arange(0 , lowercase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params['''n_kv_heads'''] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(lowercase_ , lowercase_=n_heads , lowercase_=dim , lowercase_=dim ):
return w.view(lowercase_ , dima // n_heads // 2 , 2 , lowercase_ ).transpose(1 , 2 ).reshape(lowercase_ , lowercase_ )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(lowercase_ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
A__ = [
torch.load(os.path.join(lowercase_ , f"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(lowercase_ )
]
A__ = 0
A__ = {'''weight_map''': {}}
for layer_i in range(lowercase_ ):
A__ = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) )
A__ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , )
A__ = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ )
A__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(lowercase_ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(lowercase_ )] , dim=0 )
A__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(lowercase_ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(lowercase_ )] , dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
A__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(lowercase_ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowercase_ )] , dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
# Write configs
A__ = {'''total_size''': param_count * 2}
write_json(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin.index.json''' ) )
A__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
A__ = params['''multiple_of'''] if '''multiple_of''' in params else 256
A__ = LlamaConfig(
hidden_size=lowercase_ , intermediate_size=compute_intermediate_size(lowercase_ , lowercase_ , lowercase_ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=lowercase_ , )
config.save_pretrained(lowercase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
A__ = LlamaForCausalLM.from_pretrained(lowercase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowercase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(lowercase_ , safe_serialization=lowercase_ )
shutil.rmtree(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
A__ = tokenizer_class(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=lowercase_ , help='''Whether or not to save using `safetensors`.''' )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
A__ = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , lowercase_ )
if __name__ == "__main__":
main()
| 14 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = {}
__A = job["started_at"]
__A = job["completed_at"]
__A = date_parser.parse(a_ )
__A = date_parser.parse(a_ )
__A = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__A = start
__A = end
__A = duration_in_min
return job_info
def UpperCAmelCase ( a_ , a_=None ) -> str:
"""simple docstring"""
__A = None
if token is not None:
__A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
__A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__A = requests.get(a_ , headers=a_ ).json()
__A = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
__A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
__A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 15 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCAmelCase_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCAmelCase_ = 'zero2'
lowerCAmelCase_ = 'zero3'
lowerCAmelCase_ = [ZEROa, ZEROa]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowercase__ : Optional[Any] = parameterized.to_safe_name('''_'''.join(str(__lowerCamelCase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A ( A_ ):
'''simple docstring'''
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : str ,_snake_case : Tuple ,_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Any ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : str ,_snake_case : int = 10 ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : bool = True ,) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = models[model]
lowercase__ : Optional[int] = self.run_trainer(
stage=_snake_case ,model_name=_snake_case ,eval_steps=_snake_case ,num_train_epochs=1 ,distributed=_snake_case ,fpaa=_snake_case ,)
self.do_checks(_snake_case )
return output_dir
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : str ,_snake_case : int = 10 ,_snake_case : int = 1 ,_snake_case : bool = True ,_snake_case : bool = True ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = self.get_auto_remove_tmp_dir('''./xxx''' ,after=_snake_case )
lowercase__ : Union[str, Any] = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowercase__ : Any = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowercase__ : Optional[int] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowercase__ : int = self.get_launcher(_snake_case )
lowercase__ : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case ,env=self.get_env() )
return output_dir
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str]=False ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = min(2 ,get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 16 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def _A ( UpperCamelCase_ : int) -> Any:
'''simple docstring'''
__lowercase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder"):
__lowercase = key.replace("module.encoder", "glpn.encoder")
if key.startswith("module.decoder"):
__lowercase = key.replace("module.decoder", "decoder.stages")
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__lowercase = key[key.find("patch_embed") + len("patch_embed")]
__lowercase = key.replace(F"""patch_embed{idx}""", F"""patch_embeddings.{int(UpperCamelCase_)-1}""")
if "norm" in key:
__lowercase = key.replace("norm", "layer_norm")
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__lowercase = key[key.find("glpn.encoder.layer_norm") + len("glpn.encoder.layer_norm")]
__lowercase = key.replace(F"""layer_norm{idx}""", F"""layer_norm.{int(UpperCamelCase_)-1}""")
if "layer_norm1" in key:
__lowercase = key.replace("layer_norm1", "layer_norm_1")
if "layer_norm2" in key:
__lowercase = key.replace("layer_norm2", "layer_norm_2")
if "block" in key:
# replace for example block1 by block.0
__lowercase = key[key.find("block") + len("block")]
__lowercase = key.replace(F"""block{idx}""", F"""block.{int(UpperCamelCase_)-1}""")
if "attn.q" in key:
__lowercase = key.replace("attn.q", "attention.self.query")
if "attn.proj" in key:
__lowercase = key.replace("attn.proj", "attention.output.dense")
if "attn" in key:
__lowercase = key.replace("attn", "attention.self")
if "fc1" in key:
__lowercase = key.replace("fc1", "dense1")
if "fc2" in key:
__lowercase = key.replace("fc2", "dense2")
if "linear_pred" in key:
__lowercase = key.replace("linear_pred", "classifier")
if "linear_fuse" in key:
__lowercase = key.replace("linear_fuse.conv", "linear_fuse")
__lowercase = key.replace("linear_fuse.bn", "batch_norm")
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__lowercase = key[key.find("linear_c") + len("linear_c")]
__lowercase = key.replace(F"""linear_c{idx}""", F"""linear_c.{int(UpperCamelCase_)-1}""")
if "bot_conv" in key:
__lowercase = key.replace("bot_conv", "0.convolution")
if "skip_conv1" in key:
__lowercase = key.replace("skip_conv1", "1.convolution")
if "skip_conv2" in key:
__lowercase = key.replace("skip_conv2", "2.convolution")
if "fusion1" in key:
__lowercase = key.replace("fusion1", "1.fusion")
if "fusion2" in key:
__lowercase = key.replace("fusion2", "2.fusion")
if "fusion3" in key:
__lowercase = key.replace("fusion3", "3.fusion")
if "fusion" in key and "conv" in key:
__lowercase = key.replace("conv", "convolutional_layer")
if key.startswith("module.last_layer_depth"):
__lowercase = key.replace("module.last_layer_depth", "head.head")
__lowercase = value
return new_state_dict
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : Union[str, Any]) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks):
for j in range(config.depths[i]):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__lowercase = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""")
__lowercase = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""")
# next, add keys and values (in that order) to the state dict
__lowercase = kv_weight[
: config.hidden_sizes[i], :
]
__lowercase = kv_bias[: config.hidden_sizes[i]]
__lowercase = kv_weight[
config.hidden_sizes[i] :, :
]
__lowercase = kv_bias[config.hidden_sizes[i] :]
def _A ( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_).raw)
return image
@torch.no_grad()
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : int, UpperCamelCase_ : int=False, UpperCamelCase_ : str=None) -> Any:
'''simple docstring'''
__lowercase = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3])
# load image processor (only resize + rescale)
__lowercase = GLPNImageProcessor()
# prepare image
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCamelCase_, return_tensors="pt").pixel_values
logger.info("Converting model...")
# load original state dict
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
# rename keys
__lowercase = rename_keys(UpperCamelCase_)
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase_, UpperCamelCase_)
# create HuggingFace model and load state dict
__lowercase = GLPNForDepthEstimation(UpperCamelCase_)
model.load_state_dict(UpperCamelCase_)
model.eval()
# forward pass
__lowercase = model(UpperCamelCase_)
__lowercase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__lowercase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]])
elif "kitti" in model_name:
__lowercase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]])
else:
raise ValueError(F"""Unknown model name: {model_name}""")
__lowercase = torch.Size([1, 480, 640])
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], UpperCamelCase_, atol=1E-4)
print("Looks ok!")
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub...")
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_, UpperCamelCase_), organization="nielsr", commit_message="Add model", use_temp_dir=UpperCamelCase_, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_, UpperCamelCase_), organization="nielsr", commit_message="Add image processor", use_temp_dir=UpperCamelCase_, )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
_a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 17 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a__ ( A__ ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self._create_example_records()
SCREAMING_SNAKE_CASE_ : List[str] = Dataset.from_list(_A )
self.assertListEqual(dset.column_names,["col_1", "col_2"] )
for i, r in enumerate(_A ):
self.assertDictEqual(_A,example_records[i] )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE_ : Any = Dataset.from_list(_A )
SCREAMING_SNAKE_CASE_ : Dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info,dset_from_dict.info )
def __UpperCamelCase ( self : Tuple ): # checks what happens with missing columns
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [{"col_1": 1}, {"col_2": "x"}]
SCREAMING_SNAKE_CASE_ : Tuple = Dataset.from_list(_A )
self.assertDictEqual(dset[0],{"col_1": 1} )
self.assertDictEqual(dset[1],{"col_1": None} ) # NB: first record is used for columns
def __UpperCamelCase ( self : Any ): # checks if the type can be inferred from the second record
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [{"col_1": []}, {"col_1": [1, 2]}]
SCREAMING_SNAKE_CASE_ : Optional[Any] = Dataset.from_list(_A )
self.assertEqual(dset.info.features["col_1"],Sequence(Value("int64" ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(_A ),0 )
self.assertListEqual(dset.column_names,[] )
| 18 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( lowerCamelCase__ ):
return (data["data"], data["target"])
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XGBClassifier()
classifier.fit(lowerCamelCase__ , lowerCamelCase__ )
return classifier
def lowerCamelCase_ ( ):
lowerCamelCase_ = load_iris()
lowerCamelCase_ , lowerCamelCase_ = data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 )
lowerCamelCase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase_ = xgboost(lowerCamelCase__ , lowerCamelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , display_labels=lowerCamelCase__ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 19 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase : Dict = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
lowercase : Tuple = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _snake_case( ) -> int:
lowercase : Union[str, Any] = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case( ) -> Dict:
lowercase : List[Any] = """rougeLsum"""
lowercase : List[Any] = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=[k] )[k]
lowercase : int = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case( ) -> Tuple:
lowercase : List[str] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase : Dict = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=SCREAMING_SNAKE_CASE__ )
assert score_sep == score_no_sep
def _snake_case( ) -> Union[str, Any]:
lowercase : str = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase : str = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ ) == calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Any:
lowercase : Tuple = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase : List[str] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase : Tuple = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rouge_keys=["""rougeLsum"""] , newline_sep=SCREAMING_SNAKE_CASE__ )["""rougeLsum"""]
lowercase : int = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case( ) -> Union[str, Any]:
lowercase : List[Any] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase : Dict = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 20 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
import os
import string
import sys
SCREAMING_SNAKE_CASE : int = 1 << 8
SCREAMING_SNAKE_CASE : int = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
SCREAMING_SNAKE_CASE : Tuple = KEYMAP["up"]
SCREAMING_SNAKE_CASE : Union[str, Any] = KEYMAP["left"]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE : Dict = ord(str(i))
def UpperCamelCase_( ) -> Optional[Any]:
if os.name == "nt":
import msvcrt
_lowercase : Tuple = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCamelCase_ ) == 0:
# Read the keystroke
_lowercase : List[str] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowercase : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowercase : Any = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowerCamelCase_ )
if ord(lowerCamelCase_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowercase : int = chr(KEYMAP['esc'] )
except KeyError:
_lowercase : Union[str, Any] = cha[1]
else:
_lowercase : str = ch.decode(lowerCamelCase_ )
else:
_lowercase : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowercase : str = sys.stdin.fileno()
_lowercase : List[str] = termios.tcgetattr(lowerCamelCase_ )
try:
tty.setraw(lowerCamelCase_ )
_lowercase : Any = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowerCamelCase_ , termios.TCSADRAIN , lowerCamelCase_ )
return ch
def UpperCamelCase_( ) -> Dict:
_lowercase : List[Any] = get_raw_chars()
if ord(lowerCamelCase_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCamelCase_ ) == KEYMAP["esc"]:
_lowercase : Dict = get_raw_chars()
if ord(lowerCamelCase_ ) == KEYMAP["mod_int"]:
_lowercase : Union[str, Any] = get_raw_chars()
if ord(lowerCamelCase_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCamelCase_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 21 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
def __init__( self : Tuple , *snake_case_ : str , **snake_case_ : Union[str, Any] ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 22 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_lowerCAmelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Optional[int] , a__ : Any ):
"""simple docstring"""
__snake_case = data
__snake_case = None
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Union[str, Any] ):
"""simple docstring"""
__snake_case = None
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.head
while temp is not None:
print(temp.data , end=''' ''' )
__snake_case = temp.next
print()
def a (self : List[Any] , a__ : Any ):
"""simple docstring"""
__snake_case = Node(a__ )
__snake_case = self.head
__snake_case = new_node
def a (self : Optional[int] , a__ : List[str] , a__ : Dict ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
snake_case_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 24 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = [True] * limit
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
SCREAMING_SNAKE_CASE__ : Tuple = i * 2
while index < limit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : int = index + i
SCREAMING_SNAKE_CASE__ : Tuple = [2]
for i in range(3 ,_snake_case ,2 ):
if is_prime[i]:
primes.append(_snake_case )
return primes
def lowercase_ ( _snake_case = 1_000_000 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prime_sieve(_snake_case )
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(len(_snake_case ) ):
for j in range(i + length ,len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : int = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE__ : str = j - i
SCREAMING_SNAKE_CASE__ : str = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , _a ) -> None:
_A : List[str] = data
_A : int = self
_A : Any = 0
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
_A : dict[T, DisjointSetTreeNode[T]] = {}
def a__ ( self , _a ) -> None:
# create a new set with x as its member
_A : Union[str, Any] = DisjointSetTreeNode(_a )
def a__ ( self , _a ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
_A : Any = self.map[data]
if elem_ref != elem_ref.parent:
_A : Optional[int] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def a__ ( self , _a , _a ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
_A : List[str] = nodea
else:
_A : str = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def a__ ( self , _a , _a ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(_a ) , self.find_set(_a ) )
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
_A : dict[T, dict[T, int]] = {}
def a__ ( self , _a ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
_A : Dict = {}
def a__ ( self , _a , _a , _a ) -> None:
# add an edge with the given weight
self.add_node(_a )
self.add_node(_a )
_A : Optional[int] = weight
_A : Tuple = weight
def a__ ( self ) -> GraphUndirectedWeighted[T]:
_A : Dict = []
_A : Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _a : x[2] )
# creating the disjoint set
_A : Any = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_a )
# MST generation
_A : Optional[int] = 0
_A : List[Any] = 0
_A : Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_A , _A , _A : Tuple = edges[index]
index += 1
_A : Optional[Any] = disjoint_set.find_set(_a )
_A : List[str] = disjoint_set.find_set(_a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_a , _a , _a )
disjoint_set.union(_a , _a )
return graph
| 26 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Callable ):
@wraps(_SCREAMING_SNAKE_CASE )
def _inner_fn(*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[str] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _SCREAMING_SNAKE_CASE , )
return fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _inner_fn
| 27 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : Union[str, Any] = "\\n\n"
_lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = 'cuda'
else:
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = model.to(UpperCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ )
UpperCamelCase = encodings['input_ids']
UpperCamelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ):
UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
| 28 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=1_0 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=3 , _UpperCamelCase=None , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Union[str, Any] = num_stages
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[Any] = out_features
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = scope
UpperCAmelCase_ : List[Any] = num_stages
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Optional[int] = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_snake_case : List[str] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_snake_case : Tuple = False
_snake_case : Optional[Any] = False
_snake_case : Optional[Any] = False
_snake_case : List[str] = False
_snake_case : Any = False
_snake_case : Dict = False
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = UperNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> List[Any]:
return
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase )
UpperCAmelCase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = _config_zero_init(_UpperCamelCase )
UpperCAmelCase_ : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
UpperCAmelCase_ : Optional[Any] = Image.open(__snake_case ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
UpperCAmelCase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Optional[Any] = processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_UpperCamelCase )
UpperCAmelCase_ : Any = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
UpperCAmelCase_ : str = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_UpperCamelCase )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : List[str] = processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_UpperCamelCase )
UpperCAmelCase_ : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 29 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowercase__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Any:
lowercase_ = parent
lowercase_ = 1_3
lowercase_ = 7
lowercase_ = True
lowercase_ = True
lowercase_ = True
lowercase_ = 9_9
lowercase_ = 3_2
lowercase_ = 2
lowercase_ = 4
lowercase_ = 3_7
lowercase_ = '''gelu'''
lowercase_ = 0.1
lowercase_ = 0.1
lowercase_ = 5_1_2
lowercase_ = 1_6
lowercase_ = 2
lowercase_ = 0.02
lowercase_ = 3
lowercase_ = 4
lowercase_ = None
def _lowercase ( self : Optional[int] ) -> Tuple:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) -> Optional[Any]:
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = self.prepare_config_and_inputs()
lowercase_ = True
lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
lowercase_ = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ) -> Union[str, Any]:
lowercase_ = True
lowercase_ = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
# Also check the case where encoder outputs are not passed
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
lowercase_ = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> int:
lowercase_ = self.num_labels
lowercase_ = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Tuple ) -> str:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a :Union[str, Any] = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a :Union[str, Any] = False
a :Dict = False
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = TFEsmModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def _lowercase ( self : str ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : int ) -> List[str]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _lowercase ( self : str ) -> Tuple:
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _lowercase ( self : List[Any] ) -> Any:
pass
def _lowercase ( self : List[str] ) -> Any:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase_ = model.get_bias()
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k, v in name.items():
assert isinstance(SCREAMING_SNAKE_CASE_ , tf.Variable )
else:
lowercase_ = model.get_output_embeddings()
assert x is None
lowercase_ = model.get_bias()
assert name is None
@require_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : str ) -> Optional[int]:
lowercase_ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
lowercase_ = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _lowercase ( self : int ) -> Union[str, Any]:
lowercase_ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase_ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowercase_ = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 30 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""bert-base-uncased""", """bert-base-cased"""]
__SCREAMING_SNAKE_CASE : List[str] = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowerCamelCase_ (tf.keras.Model ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A : Tuple ):
super().__init__()
_UpperCAmelCase : Any = tokenizer
_UpperCAmelCase : int = AutoConfig.from_pretrained(A )
_UpperCAmelCase : str = TFAutoModel.from_config(A )
def _A ( self : Union[str, Any] , A : Dict ):
_UpperCAmelCase : Any = self.tokenizer(A )
_UpperCAmelCase : Union[str, Any] = self.bert(**A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] ):
super().setUp()
_UpperCAmelCase : Union[str, Any] = [
BertTokenizer.from_pretrained(A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCAmelCase : List[Any] = [TFBertTokenizer.from_pretrained(A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(A , use_fast_bert_tokenizer=A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_UpperCAmelCase : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_UpperCAmelCase : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _A ( self : Optional[Any] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCAmelCase : Any = tokenizer(A , return_tensors="tf" , padding="longest" )
_UpperCAmelCase : str = tf_tokenizer(A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _A ( self : int ):
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase : str = tf_tokenizer(self.paired_sentences )
_UpperCAmelCase : int = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _A ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase : List[Any] = tf.function(A )
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCAmelCase : int = tf.constant(A )
_UpperCAmelCase : Optional[Any] = compiled_tokenizer(A )
_UpperCAmelCase : int = tf_tokenizer(A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _A ( self : List[Any] ):
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase : Any = ModelToSave(tokenizer=A )
_UpperCAmelCase : Optional[int] = tf.convert_to_tensor(self.test_sentences )
_UpperCAmelCase : Optional[Any] = model(A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCAmelCase : Union[str, Any] = Path(A ) / "saved.model"
model.save(A )
_UpperCAmelCase : Dict = tf.keras.models.load_model(A )
_UpperCAmelCase : str = loaded_model(A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 31 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCAmelCase_ : Tuple = 'bert-base-cased'
UpperCAmelCase_ : str = 'google/pegasus-xsum'
UpperCAmelCase_ : int = [' Sam ate lunch today.', 'Sams lunch ingredients.']
UpperCAmelCase_ : Optional[Any] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
UpperCAmelCase_ : str = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase_ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase_ : Optional[Any] = 'sshleifer/tiny-mbart'
UpperCAmelCase_ : Optional[int] = 'sshleifer/tiny-marian-en-de'
def SCREAMING_SNAKE_CASE_ ( __A : Path , __A : list ) -> Tuple:
"""simple docstring"""
a_ : List[str] = '\n'.join(__A )
Path(__A ).open('w' ).writelines(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> List[Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__A , F"""{split}.source""" ) , __A )
_dump_articles(os.path.join(__A , F"""{split}.target""" ) , __A )
return tmp_dir
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES )
a_ : Tuple = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES )
a_ : Optional[Any] = 4
a_ : str = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
a_ , a_ : str = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
a_ : str = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
a_ : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES )
a_ : int = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES )
a_ : List[Any] = 4
a_ : int = LegacySeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=2_0 , max_target_length=SCREAMING_SNAKE_CASE__ , )
a_ : Any = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
a_ : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
a_ : Union[str, Any] = tmp_dir.joinpath('train.source' ).open().readlines()
a_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1_2_8 , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = {x.name for x in tmp_dir.iterdir()}
a_ : Optional[Any] = {x.name for x in save_dir.iterdir()}
a_ : Optional[Any] = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 1
assert len(packed_examples[0] ) == sum(len(SCREAMING_SNAKE_CASE__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
if not FAIRSEQ_AVAILABLE:
return
a_ , a_ , a_ : Optional[int] = self._get_dataset(max_len=6_4 )
a_ : Tuple = 6_4
a_ : Any = ds.make_dynamic_sampler(SCREAMING_SNAKE_CASE__ , required_batch_size_multiple=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [len(SCREAMING_SNAKE_CASE__ ) for x in batch_sampler]
assert len(set(SCREAMING_SNAKE_CASE__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) # no dropped or added examples
a_ : Dict = DataLoader(SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 )
a_ : Tuple = []
a_ : Optional[int] = []
for batch in data_loader:
a_ : Any = batch['input_ids'].shape
a_ : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
a_ : List[Any] = np.product(batch['input_ids'].shape )
num_src_per_batch.append(SCREAMING_SNAKE_CASE__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(SCREAMING_SNAKE_CASE__ )
assert num_src_per_batch[0] == max(SCREAMING_SNAKE_CASE__ )
if failures:
raise AssertionError(F"""too many tokens in {len(SCREAMING_SNAKE_CASE__ )} batches""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ , a_ , a_ : Union[str, Any] = self._get_dataset(max_len=5_1_2 )
a_ : Union[str, Any] = 2
a_ : Tuple = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 )
a_ : Optional[Any] = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any="input_ids" ):
return [batch[k].eq(SCREAMING_SNAKE_CASE__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) )
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=1_0_0_0 , SCREAMING_SNAKE_CASE__ : Any=1_2_8 ) -> Dict:
if os.getenv('USE_REAL_DATA' , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[Any] = 'examples/seq2seq/wmt_en_ro'
a_ : Dict = max_len * 2 * 6_4
if not Path(SCREAMING_SNAKE_CASE__ ).joinpath('train.len' ).exists():
save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
a_ : Any = 'examples/seq2seq/test_data/wmt_en_ro'
a_ : Tuple = max_len * 4
save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ , a_ , a_ : Optional[int] = self._get_dataset()
a_ : Optional[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) )
a_ : List[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) )
assert idsa.intersection(SCREAMING_SNAKE_CASE__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> str:
a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
if tok_name == MBART_TINY:
a_ : Dict = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
a_ : Dict = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
a_ : Any = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
a_ : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(SCREAMING_SNAKE_CASE__ ) == 1 if tok_name == BART_TINY else len(SCREAMING_SNAKE_CASE__ ) == 0
| 32 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__A : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : Tuple ):
for attribute in key.split('''.''' ):
lowercase_ : Union[str, Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase_ : Any = getattr(__snake_case , __snake_case ).shape
else:
lowercase_ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase_ : int = value
elif weight_type == "weight_g":
lowercase_ : Tuple = value
elif weight_type == "weight_v":
lowercase_ : Optional[Any] = value
elif weight_type == "bias":
lowercase_ : List[Any] = value
elif weight_type == "running_mean":
lowercase_ : Optional[Any] = value
elif weight_type == "running_var":
lowercase_ : List[Any] = value
elif weight_type == "num_batches_tracked":
lowercase_ : Optional[Any] = value
elif weight_type == "inv_freq":
lowercase_ : List[Any] = value
else:
lowercase_ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
lowercase_ : Optional[int] = []
lowercase_ : List[Any] = fairseq_model.state_dict()
lowercase_ : Optional[int] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : int = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowercase_ : Any = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : Optional[int] = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase_ : List[str] = True
if "*" in mapped_key:
lowercase_ : int = name.split(__snake_case )[0].split('''.''' )[-2]
lowercase_ : Any = mapped_key.replace('''*''' , __snake_case )
if "pos_bias_u" in name:
lowercase_ : List[Any] = None
elif "pos_bias_v" in name:
lowercase_ : Optional[Any] = None
elif "weight_g" in name:
lowercase_ : Tuple = '''weight_g'''
elif "weight_v" in name:
lowercase_ : Dict = '''weight_v'''
elif "bias" in name:
lowercase_ : str = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase_ : Dict = '''weight'''
elif "running_mean" in name:
lowercase_ : Any = '''running_mean'''
elif "inv_freq" in name:
lowercase_ : Union[str, Any] = '''inv_freq'''
elif "running_var" in name:
lowercase_ : int = '''running_var'''
elif "num_batches_tracked" in name:
lowercase_ : Any = '''num_batches_tracked'''
else:
lowercase_ : Optional[int] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] ):
lowercase_ : Tuple = full_name.split('''conv_layers.''' )[-1]
lowercase_ : List[Any] = name.split('''.''' )
lowercase_ : Union[str, Any] = int(items[0] )
lowercase_ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase_ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase_ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase_ : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : str=True ):
if config_path is not None:
lowercase_ : Dict = WavaVecaConformerConfig.from_pretrained(__snake_case , hidden_act='''swish''' )
else:
lowercase_ : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase_ : str = '''rotary'''
if is_finetuned:
if dict_path:
lowercase_ : Tuple = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ : Tuple = target_dict.pad_index
lowercase_ : List[str] = target_dict.bos_index
lowercase_ : Dict = target_dict.eos_index
lowercase_ : List[str] = len(target_dict.symbols )
lowercase_ : Tuple = os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
lowercase_ : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase_ : Tuple = 0
lowercase_ : List[str] = 1
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__snake_case , __snake_case )
lowercase_ : Optional[int] = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowercase_ : int = True if config.feat_extract_norm == '''layer''' else False
lowercase_ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
lowercase_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
lowercase_ : Tuple = WavaVecaConformerForCTC(__snake_case )
else:
lowercase_ : Union[str, Any] = WavaVecaConformerForPreTraining(__snake_case )
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase_ : List[Any] = argparse.Namespace(task='''audio_pretraining''' )
lowercase_ : Union[str, Any] = fairseq.tasks.setup_task(__snake_case )
lowercase_ , lowercase_ , lowercase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
lowercase_ : str = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
__a : Optional[Any] = (DEISMultistepScheduler,)
__a : Any = (("""num_inference_steps""", 25),)
def A ( self : Any , **lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowercase )
return config
def A ( self : Union[str, Any] , lowercase : Optional[Any]=0 , **lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase , UpperCAmelCase = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : int ):
'''simple docstring'''
pass
def A ( self : str , lowercase : Any=0 , **lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Any , lowercase : List[str]=None , **lowercase : List[Any] ):
'''simple docstring'''
if scheduler is None:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , '''set_timesteps''' ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase = scheduler.timesteps[5]
UpperCAmelCase = scheduler.timesteps[6]
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase = self.full_loop(scheduler=lowercase )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = self.full_loop(scheduler=lowercase )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def A ( self : Dict ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : int ):
'''simple docstring'''
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type='''deis''' , solver_order=lowercase , solver_type=lowercase , )
def A ( self : Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : Tuple ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
UpperCAmelCase = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def A ( self : int ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 34 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
from math import pow
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_lowerCAmelCase : Any = int(pow(_lowerCamelCase , _lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_lowerCAmelCase , _lowerCAmelCase : Any = backtrack(
_lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_lowerCAmelCase , _lowerCAmelCase : List[Any] = backtrack(
_lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase )
return current_sum, solutions_count
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(_lowerCamelCase , _lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = """ibert"""
def __init__( self : Dict , __lowerCamelCase : Dict=30_522 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=3_072 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]="none" , **__lowerCamelCase : Union[str, Any] , ):
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :List[Any] = vocab_size
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :Optional[int] = num_attention_heads
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Any = hidden_dropout_prob
UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase :Any = max_position_embeddings
UpperCamelCase :int = type_vocab_size
UpperCamelCase :List[str] = initializer_range
UpperCamelCase :str = layer_norm_eps
UpperCamelCase :int = position_embedding_type
UpperCamelCase :Optional[Any] = quant_mode
UpperCamelCase :int = force_dequant
class _SCREAMING_SNAKE_CASE ( _a ):
@property
def _A ( self : int ):
if self.task == "multiple-choice":
UpperCamelCase :Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase :int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 38 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_a = logging.getLogger(__name__)
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
_a = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_a = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_a = Counter()
for tk_ids in data:
counter.update(tk_ids)
_a = [0] * args.vocab_size
for k, v in counter.items():
_a = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 39 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """encodec"""
def __init__( self : Any , __UpperCAmelCase : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCAmelCase : str=24000 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=128 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : str=1 , __UpperCAmelCase : str=[8, 5, 4, 2] , __UpperCAmelCase : Union[str, Any]="weight_norm" , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=True , __UpperCAmelCase : str="reflect" , __UpperCAmelCase : Any=2 , __UpperCAmelCase : int=2 , __UpperCAmelCase : str=1.0 , __UpperCAmelCase : str=1024 , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=True , **__UpperCAmelCase : Dict , ):
a : Any = target_bandwidths
a : str = sampling_rate
a : Union[str, Any] = audio_channels
a : Optional[int] = normalize
a : Optional[Any] = chunk_length_s
a : str = overlap
a : List[str] = hidden_size
a : List[str] = num_filters
a : Union[str, Any] = num_residual_layers
a : Tuple = upsampling_ratios
a : Tuple = norm_type
a : List[str] = kernel_size
a : Tuple = last_kernel_size
a : Union[str, Any] = residual_kernel_size
a : Union[str, Any] = dilation_growth_rate
a : Any = use_causal_conv
a : str = pad_mode
a : Dict = compress
a : Union[str, Any] = num_lstm_layers
a : List[Any] = trim_right_ratio
a : str = codebook_size
a : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
a : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def __snake_case ( self : List[str]):
a : Optional[int] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def __snake_case ( self : List[str]):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 40 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A : str =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
a = ["""pixel_values"""]
def __init__( self: List[Any] , UpperCamelCase__: bool = True , UpperCamelCase__: int = 32 , UpperCamelCase__: Optional[int]=PILImageResampling.BILINEAR , UpperCamelCase__: bool = True , **UpperCamelCase__: Any , ):
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : List[Any] = do_rescale
lowerCamelCase__ : Optional[int] = size_divisor
lowerCamelCase__ : Union[str, Any] = resample
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: np.ndarray , UpperCamelCase__: int , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[ChannelDimension] = None , **UpperCamelCase__: int ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase__ : Any = height // size_divisor * size_divisor
lowerCamelCase__ : Any = width // size_divisor * size_divisor
lowerCamelCase__ : Tuple = resize(UpperCamelCase__ , (new_h, new_w) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
return image
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: np.ndarray , UpperCamelCase__: float , UpperCamelCase__: Optional[ChannelDimension] = None , **UpperCamelCase__: List[str] ):
return rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[Union[TensorType, str]] = None , UpperCamelCase__: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__: int , ):
lowerCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple = size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase__ : Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
lowerCamelCase__ : Union[str, Any] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCamelCase__ : List[Any] = [self.resize(UpperCamelCase__ , size_divisor=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCamelCase__ : Dict = [self.rescale(UpperCamelCase__ , scale=1 / 255 ) for image in images]
lowerCamelCase__ : int = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowerCamelCase__ : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 41 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
__UpperCamelCase :Optional[int] = (
'''Expected a list of numbers as input, found '''
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :List[str] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=2 , _a=24 , _a=16 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , _a=2 , ):
__a = parent
__a = batch_size
__a = patch_size
__a = max_length
__a = num_mel_bins
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = frequency_stride
__a = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__a = (self.max_length - self.patch_size) // self.time_stride + 1
__a = frequency_out_dimension * time_out_dimension
__a = num_patches + 2
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, input_values, labels
def __UpperCAmelCase ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = ASTModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __UpperCAmelCase ( self ):
__a = ASTModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''input_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> int:
__a = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__a , __a = torchaudio.load(lowerCAmelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = self.default_feature_extractor
__a = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_a )
__a = self.default_feature_extractor
__a , __a = prepare_audio()
__a = audio.squeeze().numpy()
__a = feature_extractor(_a , sampling_rate=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 45 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE )
lowerCAmelCase = """""".join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase = B"""=""" * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowerCAmelCase = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""argument should be a bytes-like object or ASCII string, """
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCAmelCase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase = encoded_data[:-padding]
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : int = "▁"
lowerCamelCase : Dict = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowerCamelCase : Optional[int] = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowerCamelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowerCamelCase : Tuple = {
"ernie-m-base": 5_1_4,
"ernie-m-large": 5_1_4,
}
lowerCamelCase : int = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class A__ ( A__ ):
A__ = ["input_ids"]
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = RESOURCE_FILES_NAMES
def __init__( self : Tuple , _a : List[Any] , _a : Dict=None , _a : Dict=False , _a : List[Any]="utf8" , _a : Dict="[UNK]" , _a : List[Any]="[SEP]" , _a : List[str]="[PAD]" , _a : int="[CLS]" , _a : Dict="[MASK]" , _a : Optional[Dict[str, Any]] = None , **_a : Tuple , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , vocab_file=_a , encoding=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =sentencepiece_model_ckpt
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_SCREAMING_SNAKE_CASE =self.load_vocab(filepath=_a )
else:
_SCREAMING_SNAKE_CASE ={self.sp_model.id_to_piece(_a ): id for id in range(self.sp_model.get_piece_size() )}
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.vocab.items()}
def A ( self : Optional[int] , _a : Any ) -> List[Any]:
'''simple docstring'''
if text is None:
return None
_SCREAMING_SNAKE_CASE =self.tokenize(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ='', []
for i, ch in enumerate(_a ):
if ch in self.SP_CHAR_MAPPING:
_SCREAMING_SNAKE_CASE =self.SP_CHAR_MAPPING.get(_a )
else:
_SCREAMING_SNAKE_CASE =unicodedata.normalize('NFKC' , _a )
if self.is_whitespace(_a ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_a ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =normalized_text, [], 0
if self.do_lower_case:
_SCREAMING_SNAKE_CASE =text.lower()
for token in split_tokens:
if token[:1] == "▁":
_SCREAMING_SNAKE_CASE =token[1:]
_SCREAMING_SNAKE_CASE =text[offset:].index(_a ) + offset
_SCREAMING_SNAKE_CASE =start + len(_a )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_SCREAMING_SNAKE_CASE =end
return token_mapping
@property
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.vocab )
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : Any , _a : List[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A ( self : Any , _a : int ) -> Optional[Any]:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_a , _a ) for c in text) )
def A ( self : List[Any] , _a : int , _a : Optional[int]=False , _a : Any=64 , _a : List[str]=0.1 ) -> Optional[Any]:
'''simple docstring'''
if self.sp_model_kwargs.get('enable_sampling' ) is True:
_SCREAMING_SNAKE_CASE =True
if self.sp_model_kwargs.get('alpha' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
_SCREAMING_SNAKE_CASE =self.sp_model.EncodeAsPieces(_a )
else:
_SCREAMING_SNAKE_CASE =self.sp_model.SampleEncodeAsPieces(_a , _a , _a )
_SCREAMING_SNAKE_CASE =[]
for pi, piece in enumerate(_a ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_a ) and pi != 0:
new_pieces.append(_a )
continue
else:
continue
_SCREAMING_SNAKE_CASE =0
for i, chunk in enumerate(_a ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_a ) or self.is_punct(_a ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_a )
_SCREAMING_SNAKE_CASE =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
if len(_a ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A ( self : List[str] , _a : List[str] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =''.join(_a ).replace(_a , ' ' ).strip()
return out_string
def A ( self : str , _a : int ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(_a )
_SCREAMING_SNAKE_CASE =''.join(_a ).replace(_a , ' ' ).strip()
return out_string
def A ( self : List[str] , _a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def A ( self : List[Any] , _a : Optional[Any] ) -> Any:
'''simple docstring'''
return self.reverse_vocab.get(_a , self.unk_token )
def A ( self : Optional[Any] , _a : Union[str, Any] , _a : Dict=None ) -> Optional[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A ( self : List[Any] , _a : Dict , _a : Optional[Any]=None ) -> Optional[int]:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A ( self : int , _a : Tuple , _a : Optional[int]=None , _a : Tuple=False ) -> List[Any]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def A ( self : List[str] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_a ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_a ) + 1) + [1] * (len(_a ) + 3)
def A ( self : List[Any] , _a : str ) -> Optional[int]:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A ( self : Tuple , _a : Tuple ) -> Optional[Any]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A ( self : Tuple , _a : Optional[int] ) -> Any:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A ( self : str , _a : int ) -> List[Any]:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_a ) == 1:
_SCREAMING_SNAKE_CASE =unicodedata.category(_a )
if cat == "Zs":
return True
return False
def A ( self : str , _a : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
with io.open(_a , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_a ):
_SCREAMING_SNAKE_CASE =line.rstrip('\n' )
_SCREAMING_SNAKE_CASE =int(_a )
return token_to_idx
def A ( self : str , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
if os.path.isdir(_a ):
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_SCREAMING_SNAKE_CASE =(filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_a , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
_SCREAMING_SNAKE_CASE =token_index
writer.write(token + '\n' )
index += 1
_SCREAMING_SNAKE_CASE =os.path.join(_a , 'sentencepiece.bpe.model' )
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (vocab_file,)
| 47 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE__ : Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
SCREAMING_SNAKE_CASE__ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE__ : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
SCREAMING_SNAKE_CASE__ : Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(lowerCAmelCase__ )
class UpperCamelCase__ :
'''simple docstring'''
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
lowerCamelCase : Optional[Any] = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : Union[str, Any] = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
lowerCamelCase : int = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
lowerCamelCase : List[Any] = len(UpperCamelCase__ )
lowerCamelCase : int = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
F'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.''' )
lowerCamelCase : Any = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
lowerCamelCase : Any = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
lowerCamelCase : int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
lowerCamelCase : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase : List[str] = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = 64 , UpperCamelCase__ = 4 , ) -> List[DPRSpanPrediction]:
lowerCamelCase : Any = reader_input["input_ids"]
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = reader_output[:3]
lowerCamelCase : str = len(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
lowerCamelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCamelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase : Union[str, Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase : Any = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase : Tuple = len(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[DPRSpanPrediction]:
lowerCamelCase : Any = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase : List[Any] = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
lowerCamelCase : Any = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
| 48 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__snake_case :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
def A (__A : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ = generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = []
for current_row_idx in range(__A ):
UpperCAmelCase_ = populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def A (__A : list[list[int]] , __A : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def A (__A : list[list[int]] , __A : list[int] , __A : int , __A : int , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , __A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(__A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(__A )
return result
def A () -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A : Callable , __A : int ) -> None:
UpperCAmelCase_ = F"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 51 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
UpperCamelCase : Any = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
UpperCamelCase : str = model.state_dict()
def to_tf_var_name(_lowerCAmelCase ):
for patt, repl in iter(_lowerCAmelCase ):
UpperCamelCase : str = name.replace(_lowerCAmelCase , _lowerCAmelCase )
return F"""bert/{name}"""
def create_tf_var(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : str = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase : str = tf.get_variable(dtype=_lowerCAmelCase , shape=tensor.shape , name=_lowerCAmelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCAmelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase : str = to_tf_var_name(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase : Any = torch_tensor.T
UpperCamelCase : Any = create_tf_var(tensor=_lowerCAmelCase , name=_lowerCAmelCase , session=_lowerCAmelCase )
tf.keras.backend.set_value(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[str] = session.run(_lowerCAmelCase )
print(F"""Successfully created {tf_name}: {np.allclose(_lowerCAmelCase , _lowerCAmelCase )}""" )
UpperCamelCase : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A_ ( _lowerCAmelCase=None ) -> Dict:
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory in which to save tensorflow model" )
UpperCamelCase : List[Any] = parser.parse_args(_lowerCAmelCase )
UpperCamelCase : Dict = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a__ : Tuple =[
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def lowercase__ ( __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a__ : Tuple =argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
a__ : Union[str, Any] =parser.parse_args()
if args.check_lib:
a__ : Optional[int] =importlib.import_module('''transformers''')
a__ : List[str] =Path(transformers_module.__file__).parent
else:
a__ : Tuple =Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 53 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
a__ : List[str] = HUGGINGFACE_HUB_CACHE
a__ : int = '''config.json'''
a__ : List[str] = '''diffusion_pytorch_model.bin'''
a__ : Optional[int] = '''diffusion_flax_model.msgpack'''
a__ : str = '''model.onnx'''
a__ : Optional[Any] = '''diffusion_pytorch_model.safetensors'''
a__ : Union[str, Any] = '''weights.pb'''
a__ : Optional[int] = '''https://huggingface.co'''
a__ : Dict = default_cache_path
a__ : Optional[int] = '''diffusers_modules'''
a__ : Any = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
a__ : Any = ['''fp16''', '''non-ema''']
a__ : Dict = '''.self_attn'''
| 54 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
'''simple docstring'''
a_ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 55 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "align_text_model"
def __init__( self : Any , lowercase_ : str=3_0522 , lowercase_ : int=768 , lowercase_ : Tuple=12 , lowercase_ : int=12 , lowercase_ : Dict=3072 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=512 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Union[str, Any]=1e-12 , lowercase_ : Optional[int]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Tuple=True , **lowercase_ : List[str] , ):
super().__init__(**lowercase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
@classmethod
def A_ ( cls : int , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Tuple ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align_vision_model"
def __init__( self : Any , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.001 , lowercase_ : float = 0.99 , lowercase_ : float = 0.2 , **lowercase_ : Any , ):
super().__init__(**lowercase_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = drop_connect_rate
snake_case_ = sum(lowercase_ ) * 4
@classmethod
def A_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align"
snake_case_ = True
def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Dict=640 , lowercase_ : Optional[int]=1.0 , lowercase_ : Any=0.02 , **lowercase_ : Optional[int] , ):
super().__init__(**lowercase_ )
if text_config is None:
snake_case_ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
snake_case_ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
snake_case_ = AlignTextConfig(**lowercase_ )
snake_case_ = AlignVisionConfig(**lowercase_ )
snake_case_ = projection_dim
snake_case_ = temperature_init_value
snake_case_ = initializer_range
@classmethod
def A_ ( cls : Optional[int] , lowercase_ : AlignTextConfig , lowercase_ : AlignVisionConfig , **lowercase_ : List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 56 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__a , "num_attention_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=3 , __a=2 , __a=1 , __a=16 , __a=[1_28, 2_56, 3_84] , __a=[4, 6, 8] , __a=[2, 3, 4] , __a=[16, 16, 16] , __a=0 , __a=[2, 2, 2] , __a=[2, 2, 2] , __a=0.0_2 , __a=True , __a=True , __a=2 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = kernel_size
__lowerCAmelCase = stride
__lowerCAmelCase = padding
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = depths
__lowerCAmelCase = key_dim
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = patch_size
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = initializer_range
__lowerCAmelCase = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = LevitModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
__lowerCAmelCase = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowerCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LevitForImageClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =(
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__UpperCAmelCase : Tuple =(
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCAmelCase : int =False
__UpperCAmelCase : str =False
__UpperCAmelCase : Optional[Any] =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : int =False
def snake_case ( self ):
__lowerCAmelCase = LevitModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def snake_case ( self ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def snake_case ( self ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__a ) , __a )
__lowerCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowerCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
pass
def snake_case ( self , __a , __a , __a=False ):
__lowerCAmelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def snake_case ( self ):
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
__lowerCAmelCase = model(**__a ).loss
loss.backward()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(__a )
model.gradient_checkpointing_enable()
model.to(__a )
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
__lowerCAmelCase = model(**__a ).loss
loss.backward()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__a ) as warning_list:
__lowerCAmelCase = model(**__a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def snake_case ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = LevitModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self ):
__lowerCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__a )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 57 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) ->bool:
_SCREAMING_SNAKE_CASE = get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_SCREAMING_SNAKE_CASE = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase ( __lowerCamelCase : str ) ->list[int]:
_SCREAMING_SNAKE_CASE = [0]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_SCREAMING_SNAKE_CASE = failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowercase_ = """abc1abc12"""
lowercase_ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowercase_ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase_ = """ABABX"""
lowercase_ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
lowercase_ = """AAAB"""
lowercase_ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
lowercase_ = """abcdabcy"""
lowercase_ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
lowercase_ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 58 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : Union[str, Any] = ConsistencyModelPipeline
A__ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
A__ : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
if class_cond:
snake_case : List[Any] = self.dummy_cond_unet
else:
snake_case : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : List[Any] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Any , snake_case__ : Optional[Any]=0 ) -> Optional[Any]:
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
snake_case : List[str] = torch.manual_seed(snake_case__ )
else:
snake_case : Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
snake_case : Optional[int] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components()
snake_case : Optional[Any] = ConsistencyModelPipeline(**snake_case__ )
snake_case : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Union[str, Any] = self.get_dummy_inputs(snake_case__ )
snake_case : Optional[int] = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : int = self.get_dummy_components(class_cond=snake_case__ )
snake_case : Any = ConsistencyModelPipeline(**snake_case__ )
snake_case : List[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Union[str, Any] = self.get_dummy_inputs(snake_case__ )
snake_case : Dict = 0
snake_case : List[Any] = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Optional[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Union[str, Any] = self.get_dummy_components()
snake_case : Optional[Any] = ConsistencyModelPipeline(**snake_case__ )
snake_case : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Dict = self.get_dummy_inputs(snake_case__ )
snake_case : List[Any] = 1
snake_case : Dict = None
snake_case : Tuple = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components(class_cond=snake_case__ )
snake_case : List[Any] = ConsistencyModelPipeline(**snake_case__ )
snake_case : Union[str, Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : List[str] = self.get_dummy_inputs(snake_case__ )
snake_case : Any = 1
snake_case : Optional[int] = None
snake_case : List[Any] = 0
snake_case : int = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Optional[Any]=False , snake_case__ : str="cpu" , snake_case__ : Optional[int]=torch.floataa , snake_case__ : Optional[Any]=(1, 3, 64, 64) ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = torch.manual_seed(snake_case__ )
snake_case : List[str] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
snake_case : Tuple = self.get_fixed_latents(seed=snake_case__ , device=snake_case__ , dtype=snake_case__ , shape=snake_case__ )
snake_case : int = latents
return inputs
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[Any]=0 , snake_case__ : Union[str, Any]="cpu" , snake_case__ : int=torch.floataa , snake_case__ : Any=(1, 3, 64, 64) ) -> int:
'''simple docstring'''
if type(snake_case__ ) == str:
snake_case : Optional[Any] = torch.device(snake_case__ )
snake_case : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
snake_case : List[Any] = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
return latents
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : List[Any] = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : str = self.get_inputs()
snake_case : Optional[int] = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : Dict = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : int = self.get_inputs()
snake_case : Union[str, Any] = 1
snake_case : Optional[int] = None
snake_case : int = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : Optional[int] = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : str = self.get_inputs(get_fixed_latents=snake_case__ , device=snake_case__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case__ , enable_math=snake_case__ , enable_mem_efficient=snake_case__ ):
snake_case : List[Any] = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Optional[Any] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
snake_case : str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case : str = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Dict = self.get_inputs(get_fixed_latents=snake_case__ , device=snake_case__ )
snake_case : Any = 1
snake_case : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case__ , enable_math=snake_case__ , enable_mem_efficient=snake_case__ ):
snake_case : Union[str, Any] = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case : int = image[0, -3:, -3:, -1]
snake_case : List[Any] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 59 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : int = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Any = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Tuple = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : Any , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Tuple="[UNK]" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : List[str]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : str="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Tuple , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[Any] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : List[Any] = do_lower_case
lowerCAmelCase : Optional[Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : int = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Tuple = do_lower_case
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=None ):
lowerCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : int = [self.sep_token_id]
lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 60 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : Dict = process
UpperCAmelCase_ : List[Any] = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.dataset[i]
UpperCAmelCase_ : Tuple = self.process(lowercase_ , **self.params )
return processed
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = loader
UpperCAmelCase_ : Optional[int] = infer
UpperCAmelCase_ : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : str = loader_batch_size
# Internal bookkeeping
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = iter(self.loader )
return self
def UpperCamelCase__ ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase_ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase_ : List[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ , lowercase_ ):
# Convert ModelOutput to tuple first
UpperCAmelCase_ : List[str] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ : int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase_ : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ : Optional[int] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ : str = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase_ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase_ : Union[str, Any] = self._loader_batch_data.__class__(lowercase_ )
self._loader_batch_index += 1
return result
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase_ : Dict = next(self.iterator )
UpperCAmelCase_ : Union[str, Any] = self.infer(lowercase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase_ : Union[str, Any] = processed
else:
UpperCAmelCase_ : str = list(processed.keys() )[0]
UpperCAmelCase_ : Optional[Any] = processed[key]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : str = len(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ : str = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase_ : Dict = processed
UpperCAmelCase_ : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ):
"""simple docstring"""
super().__init__(lowercase_ , lowercase_ , lowercase_ )
def __iter__( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = iter(self.loader )
UpperCAmelCase_ : Dict = None
return self
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.subiterator is None:
UpperCAmelCase_ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase_ : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase_ : Tuple = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase_ : Dict = next(self.subiterator )
return processed
class A_ (lowercase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = iter(self.loader )
return self
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ : Union[str, Any] = self.loader_batch_item()
UpperCAmelCase_ : Dict = item.pop("is_last" )
accumulator.append(lowercase_ )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase_ : Optional[int] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = processed
else:
UpperCAmelCase_ : Any = list(processed.keys() )[0]
UpperCAmelCase_ : str = processed[key]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = len(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ : Tuple = observed_batch_size
UpperCAmelCase_ : List[Any] = processed
UpperCAmelCase_ : List[Any] = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ : Dict = self.loader_batch_item()
UpperCAmelCase_ : Dict = item.pop("is_last" )
accumulator.append(lowercase_ )
if is_last:
return accumulator
else:
UpperCAmelCase_ : List[Any] = processed
UpperCAmelCase_ : Optional[Any] = item.pop("is_last" )
accumulator.append(lowercase_ )
return accumulator
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = dataset
UpperCAmelCase_ : Dict = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
return self.dataset[i][self.key]
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Any = keya
UpperCAmelCase_ : str = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 61 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCamelCase ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
__UpperCamelCase =features.copy()
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[jsonl_path]
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=("train",) ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
__UpperCamelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader({'train': jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader({'train': jsonl_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if split:
__UpperCamelCase ={split: jsonl_path}
else:
__UpperCamelCase ='train'
__UpperCamelCase ={'train': jsonl_path, 'test': jsonl_path}
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
return [json.loads(SCREAMING_SNAKE_CASE__ ) for line in buffer]
class UpperCAmelCase__ :
"""simple docstring"""
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _a ( self , A_ , A_ , A_ ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(A_ )
assert isinstance(A_ , A_ )
assert isinstance(exported_content[0] , A_ )
assert len(A_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ , orient=A_ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(A_ )
assert isinstance(A_ , A_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A_ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _a ( self , A_ , A_ , A_ ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(A_ )
assert isinstance(A_ , A_ )
assert isinstance(exported_content[0] , A_ )
assert len(A_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ , orient=A_ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(A_ )
assert isinstance(A_ , A_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A_ ) == 10
def _a ( self , A_ ) -> Optional[Any]:
with pytest.raises(A_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
__UpperCamelCase =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(A_ , A_ , compression=A_ ).write()
with fsspec.open(A_ , 'rb' , compression='infer' ) as f:
__UpperCamelCase =f.read()
with fsspec.open(A_ , 'rb' , compression='infer' ) as f:
__UpperCamelCase =f.read()
assert exported_content == original_content
| 62 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 200 ) -> int:
_a = [1, 2, 5, 10, 20, 50, 100, 200]
_a = [0] * (pence + 1)
_a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 63 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0.9_99 , snake_case__ : Dict="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Dict ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_snake_case : int = []
for i in range(snake_case__ ):
_snake_case : Tuple = i / num_diffusion_timesteps
_snake_case : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowercase( __a , __a ):
'''simple docstring'''
@register_to_config
def __init__( self: List[str], a_: int = 1_000, a_: str = "fixed_small_log", a_: bool = True, a_: Optional[float] = 1.0, a_: str = "epsilon", a_: str = "squaredcos_cap_v2", ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
_snake_case : Tuple = betas_for_alpha_bar(a_ )
_snake_case : List[str] = 1.0 - self.betas
_snake_case : Optional[int] = torch.cumprod(self.alphas, dim=0 )
_snake_case : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_snake_case : Optional[Any] = 1.0
# setable values
_snake_case : str = None
_snake_case : Tuple = torch.from_numpy(np.arange(0, a_ )[::-1].copy() )
_snake_case : int = variance_type
def UpperCamelCase_ ( self: Dict, a_: torch.FloatTensor, a_: Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCamelCase_ ( self: Any, a_: int, a_: Union[str, torch.device] = None ):
'''simple docstring'''
_snake_case : List[Any] = num_inference_steps
_snake_case : Any = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_snake_case : Optional[int] = (np.arange(0, a_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_snake_case : Optional[Any] = torch.from_numpy(a_ ).to(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Tuple, a_: Dict=None, a_: Union[str, Any]=None, a_: Tuple=None ):
'''simple docstring'''
if prev_timestep is None:
_snake_case : Optional[Any] = t - 1
_snake_case : Dict = self.alphas_cumprod[t]
_snake_case : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_snake_case : List[str] = 1 - alpha_prod_t
_snake_case : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_snake_case : int = self.betas[t]
else:
_snake_case : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_snake_case : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_snake_case : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_snake_case : Optional[int] = torch.log(torch.clamp(a_, min=1E-20 ) )
_snake_case : int = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_snake_case : Optional[Any] = variance.log()
_snake_case : List[str] = beta.log()
_snake_case : Optional[int] = (predicted_variance + 1) / 2
_snake_case : Dict = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self: Optional[Any], a_: torch.FloatTensor, a_: int, a_: torch.FloatTensor, a_: Optional[int] = None, a_: Tuple=None, a_: bool = True, ):
'''simple docstring'''
_snake_case : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_snake_case , _snake_case : str = torch.split(a_, sample.shape[1], dim=1 )
else:
_snake_case : Dict = None
# 1. compute alphas, betas
if prev_timestep is None:
_snake_case : Optional[Any] = t - 1
_snake_case : Optional[int] = self.alphas_cumprod[t]
_snake_case : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_snake_case : Optional[Any] = 1 - alpha_prod_t
_snake_case : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_snake_case : str = self.betas[t]
_snake_case : int = self.alphas[t]
else:
_snake_case : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
_snake_case : str = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_snake_case : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_snake_case : Tuple = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_snake_case : int = torch.clamp(
a_, -self.config.clip_sample_range, self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_snake_case : Any = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case : Any = 0
if t > 0:
_snake_case : str = randn_tensor(
model_output.shape, dtype=model_output.dtype, generator=a_, device=model_output.device )
_snake_case : Union[str, Any] = self._get_variance(
a_, predicted_variance=a_, prev_timestep=a_, )
if self.variance_type == "fixed_small_log":
_snake_case : Tuple = variance
elif self.variance_type == "learned_range":
_snake_case : int = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
""" for the UnCLIPScheduler.""" )
_snake_case : List[Any] = variance * variance_noise
_snake_case : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=a_, pred_original_sample=a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: torch.FloatTensor, a_: torch.FloatTensor, a_: torch.IntTensor, ):
'''simple docstring'''
_snake_case : List[Any] = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype )
_snake_case : str = timesteps.to(original_samples.device )
_snake_case : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
_snake_case : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_snake_case : Optional[Any] = sqrt_alpha_prod.unsqueeze(-1 )
_snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
_snake_case : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_snake_case : str = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_snake_case : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 64 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 65 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__a = logging.getLogger(__name__)
__a = tf.data.AUTOTUNE
def A_ ( ):
'''simple docstring'''
snake_case_ :Optional[int] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=_lowercase, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=_lowercase, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=_lowercase, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=_lowercase, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=_lowercase, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=_lowercase, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=_lowercase, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=_lowercase, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=_lowercase, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=_lowercase, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=_lowercase, default=1e-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=_lowercase, default=1e-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=_lowercase, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=_lowercase, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=_lowercase, required=_lowercase, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=_lowercase, help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ :Dict = parser.parse_args()
return args
def A_ ( _lowercase ):
'''simple docstring'''
try:
if args.tpu_name:
snake_case_ :List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
snake_case_ :List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_lowercase )
tf.tpu.experimental.initialize_tpu_system(_lowercase )
return tpu
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = 0
for file in file_list:
snake_case_ :List[Any] = file.split("""/""" )[-1]
snake_case_ :Union[str, Any] = re.search(r"""-\d+-(\d+)\.tfrecord""", _lowercase ).group(1 )
snake_case_ :List[str] = int(_lowercase )
num_samples += sample_count
return num_samples
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase=None ):
'''simple docstring'''
snake_case_ :Optional[int] = count_samples(_lowercase )
snake_case_ :Union[str, Any] = tf.data.Dataset.from_tensor_slices(_lowercase )
if shuffle:
snake_case_ :List[str] = dataset.shuffle(len(_lowercase ) )
snake_case_ :Any = tf.data.TFRecordDataset(_lowercase, num_parallel_reads=_lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ :Optional[Any] = dataset.apply(tf.data.experimental.assert_cardinality(_lowercase ) )
snake_case_ :Any = dataset.map(_lowercase, num_parallel_calls=_lowercase )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ :Dict = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ :Dict = dataset.batch(_lowercase, drop_remainder=_lowercase )
snake_case_ :Any = dataset.map(_lowercase, num_parallel_calls=_lowercase )
snake_case_ :Optional[int] = dataset.prefetch(_lowercase )
return dataset
def A_ ( _lowercase ):
'''simple docstring'''
if not args.no_tpu:
snake_case_ :Optional[Any] = initialize_tpu(_lowercase )
snake_case_ :Dict = tf.distribute.TPUStrategy(_lowercase )
else:
snake_case_ :Tuple = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ :List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ :Any = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ :List[str] = tokenizer.vocab_size
snake_case_ :Optional[int] = tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ :Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ :Optional[Any] = count_samples(_lowercase )
snake_case_ :Union[str, Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ :List[str] = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ :List[str] = TFAutoModelForMaskedLM.from_config(_lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_, snake_case_ :Optional[Any] = create_optimizer(
num_train_steps=_lowercase, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowercase, metrics=["""accuracy"""] )
def decode_fn(_lowercase ):
snake_case_ :Tuple = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowercase, _lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ :Any = DataCollatorForLanguageModeling(
tokenizer=_lowercase, mlm_probability=args.mlm_probability, mlm=_lowercase, return_tensors="""tf""" )
def mask_with_collator(_lowercase ):
# TF really needs an isin() function
snake_case_ :List[str] = (
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_, snake_case_ :Union[str, Any] = data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(_lowercase ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=_lowercase, )
return batch
snake_case_ :Optional[Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ :Union[str, Any] = prepare_dataset(
_lowercase, decode_fn=_lowercase, mask_fn=_lowercase, batch_size=_lowercase, shuffle=_lowercase, shuffle_buffer_size=args.shuffle_buffer_size, )
snake_case_ :Optional[Any] = prepare_dataset(
_lowercase, decode_fn=_lowercase, mask_fn=_lowercase, batch_size=_lowercase, shuffle=_lowercase, )
snake_case_ :List[Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=_lowercase ) )
model.fit(
_lowercase, validation_data=_lowercase, epochs=args.num_epochs, callbacks=_lowercase, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__a = parse_args()
main(args)
| 66 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__UpperCAmelCase =2_5_6_0_4_7
__UpperCAmelCase =2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : Optional[int] =NllbTokenizerFast
lowerCamelCase : str =True
lowerCamelCase : str =True
lowerCamelCase : Tuple ={}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = NllbTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = NllbTokenizer(a , keep_accents=a )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(a )
__lowerCamelCase = tokenizer_p.save_pretrained(a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(a )
__lowerCamelCase = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(a , legacy_format=a )
__lowerCamelCase = tokenizer_p.save_pretrained(a )
# Checks it save with the same files
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(a )
__lowerCamelCase = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(a , legacy_format=a )
__lowerCamelCase = tokenizer_p.save_pretrained(a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(a )
__lowerCamelCase = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if not self.test_seqaseq:
return
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
__lowerCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
__lowerCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
__lowerCamelCase = tokenizer.prepare_seqaseq_batch(
src_texts=a , tgt_texts=a , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__lowerCamelCase = tokenizer.prepare_seqaseq_batch(
a , tgt_texts=a , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__lowerCamelCase = tokenizer.prepare_seqaseq_batch(
src_texts=a , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , a )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = [AddedToken('''<special>''' , lstrip=a )]
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , additional_special_tokens=a , **a )
__lowerCamelCase = tokenizer_r.encode('''Hey this is a <special> token''' )
__lowerCamelCase = tokenizer_r.encode('''<special>''' , add_special_tokens=a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , additional_special_tokens=a , **a , )
__lowerCamelCase = self.tokenizer_class.from_pretrained(
a , additional_special_tokens=a , **a )
__lowerCamelCase = tokenizer_p.encode('''Hey this is a <special> token''' )
__lowerCamelCase = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
lowerCamelCase : int ="facebook/nllb-200-distilled-600M"
lowerCamelCase : Optional[int] =[
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowerCamelCase : Tuple =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowerCamelCase : Union[str, Any] =[
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ):
"""simple docstring"""
__lowerCamelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
__lowerCamelCase = 1
return cls
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_60_57 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(a , self.tokenizer.all_special_ids )
# fmt: off
__lowerCamelCase = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
__lowerCamelCase = self.tokenizer.decode(a , skip_special_tokens=a )
__lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , a )
__lowerCamelCase = 10
__lowerCamelCase = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , a )
self.assertEqual(len(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_62_03, 3] )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
__lowerCamelCase = NllbTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCamelCase = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(a , a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors='''pt''' )
__lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=10 , return_tensors='''pt''' )
__lowerCamelCase = targets['''input_ids''']
__lowerCamelCase = shift_tokens_right(
a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
'''input_ids''': [[25_60_47, 70, 73_56, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_60_57,
} , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = True
__lowerCamelCase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
__lowerCamelCase = False
__lowerCamelCase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 67 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase__ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase__ = None
try:
import fcntl
except ImportError:
lowerCAmelCase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase__ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase__ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowerCAmelCase__ = """3.0.12"""
lowerCAmelCase__ = None
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
global _logger
A__ = _logger or logging.getLogger(__name__ )
return _logger
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = lock_file
return None
def __str__( self ) -> Dict:
'''simple docstring'''
A__ = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = lock
return None
def __enter__( self ) -> List[Any]:
'''simple docstring'''
return self.lock
def __exit__( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
self.lock.release()
return None
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=-1 , lowercase=None ) -> Any:
'''simple docstring'''
A__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
A__ = self.hash_filename_if_too_long(lowercase , lowercase )
# The path to the lock file.
A__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A__ = None
# The default timeout value.
A__ = timeout
# We use this lock primarily for the lock counter.
A__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A__ = 0
return None
@property
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return self._lock_file
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._timeout
@timeout.setter
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
A__ = float(lowercase )
return None
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return self._lock_file_fd is not None
def UpperCamelCase ( self , lowercase=None , lowercase=0.05 ) -> Tuple:
'''simple docstring'''
if timeout is None:
A__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A__ = id(self )
A__ = self._lock_file
A__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(lowercase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase ( self , lowercase=False ) -> Optional[Any]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A__ = id(self )
A__ = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
A__ = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self ) -> int:
'''simple docstring'''
self.release(force=lowercase )
return None
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = os.path.basename(lowercase )
if len(lowercase ) > max_length and max_length > 0:
A__ = os.path.dirname(lowercase )
A__ = str(hash(lowercase ) )
A__ = filename[: max_length - len(lowercase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowercase , lowercase )
else:
return path
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=-1 , lowercase=None ) -> Dict:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase )
A__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowercase )
else:
A__ = fd
return None
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self._lock_file_fd
A__ = None
msvcrt.locking(lowercase , msvcrt.LK_UNLCK , 1 )
os.close(lowercase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=-1 , lowercase=None ) -> List[Any]:
'''simple docstring'''
A__ = os.statvfs(os.path.dirname(lowercase ) ).f_namemax
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A__ = os.open(self._lock_file , lowercase )
try:
fcntl.flock(lowercase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase )
else:
A__ = fd
return None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self._lock_file_fd
A__ = None
fcntl.flock(lowercase , fcntl.LOCK_UN )
os.close(lowercase )
return None
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase )
except OSError:
pass
else:
A__ = fd
return None
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
A__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase__ = None
if msvcrt:
lowerCAmelCase__ = WindowsFileLock
elif fcntl:
lowerCAmelCase__ = UnixFileLock
else:
lowerCAmelCase__ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 68 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__UpperCamelCase = {'''allegro/herbert-base-cased''': 514}
__UpperCamelCase = {}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = HerbertTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__="</s>", **lowerCAmelCase__, ) -> int:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, **lowerCAmelCase__, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 69 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] =logging.get_logger(__name__)
A__ : int =[
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_lowerCAmelCase = k.replace(lowerCAmelCase , lowerCAmelCase )
if k.startswith("""encoder""" ):
_lowerCAmelCase = k.replace(""".attn""" , """.self_attn""" )
_lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
_lowerCAmelCase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
_lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
_lowerCAmelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
_lowerCAmelCase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_lowerCAmelCase = sd.pop(lowerCAmelCase )
_lowerCAmelCase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
_lowerCAmelCase = v
A__ : Optional[Any] =['''START''']
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
_lowerCAmelCase = model["""model"""]
_lowerCAmelCase = BlenderbotConfig.from_json_file(lowerCAmelCase )
_lowerCAmelCase = BlenderbotForConditionalGeneration(lowerCAmelCase )
_lowerCAmelCase = m.model.state_dict().keys()
_lowerCAmelCase = []
_lowerCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_lowerCAmelCase = rename_state_dict_key(lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_lowerCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase )
m.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
m.half()
m.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
A__ : Dict =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 70 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
def A ( a_ ) -> List[str]:
__UpperCamelCase : List[Any] =len(a_ )
__UpperCamelCase : Optional[int] =sum(a_ )
__UpperCamelCase : Optional[int] =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
__UpperCamelCase : Dict =True
for i in range(1 ,s + 1 ):
__UpperCamelCase : List[Any] =False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
__UpperCamelCase : str =dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCamelCase : Any =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
__UpperCamelCase : List[Any] =s - 2 * j
break
return diff
| 71 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : str = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = 3
_lowerCamelCase : List[Any] = (3_2, 3_2)
_lowerCamelCase : str = jax.random.PRNGKey(0 )
_lowerCamelCase : int = jax.random.uniform(__lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
| 72 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__lowerCamelCase : Optional[int] = ''
while len(lowerCamelCase__ ) % 3 != 0:
__lowerCamelCase : int = '0' + bin_string
__lowerCamelCase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(lowerCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowerCamelCase : List[Any] = 0
for index, val in enumerate(lowerCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase__ ) )
oct_string += str(lowerCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''spiece.model'''}
_lowercase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_lowercase = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,A_ : Optional[Any] ,A_ : Optional[Any]="</s>" ,A_ : Union[str, Any]="<unk>" ,A_ : Dict=[] ,A_ : Optional[Dict[str, Any]] = None ,**A_ : str ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ ,unk_token=A_ ,additional_special_tokens=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, int]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Tuple:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : int ,A_ : str ) -> str:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ) -> Optional[Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ) -> Dict:
if index < self.sp_model.get_piece_size():
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ) -> Dict:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 74 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
'''simple docstring'''
import math
def a_ ( __snake_case : list , __snake_case : int = 0 , __snake_case : int = 0 ) -> list:
"""simple docstring"""
lowerCamelCase_ =end or len(__snake_case )
for i in range(__snake_case , __snake_case ):
lowerCamelCase_ =i
lowerCamelCase_ =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ =array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ =temp_index_value
return array
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int ) -> None: # Max Heap
"""simple docstring"""
lowerCamelCase_ =index
lowerCamelCase_ =2 * index + 1 # Left Node
lowerCamelCase_ =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ =left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ =right_index
if largest != index:
lowerCamelCase_, lowerCamelCase_ =array[largest], array[index]
heapify(__snake_case , __snake_case , __snake_case )
def a_ ( __snake_case : list ) -> list:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
for i in range(n // 2 , -1 , -1 ):
heapify(__snake_case , __snake_case , __snake_case )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_, lowerCamelCase_ =array[0], array[i]
heapify(__snake_case , 0 , __snake_case )
return array
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =low
lowerCamelCase_ =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_, lowerCamelCase_ =array[j], array[i]
i += 1
def a_ ( __snake_case : list ) -> list:
"""simple docstring"""
if len(__snake_case ) == 0:
return array
lowerCamelCase_ =2 * math.ceil(math.loga(len(__snake_case ) ) )
lowerCamelCase_ =16
return intro_sort(__snake_case , 0 , len(__snake_case ) , __snake_case , __snake_case )
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__snake_case )
max_depth -= 1
lowerCamelCase_ =median_of_a(__snake_case , __snake_case , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ =partition(__snake_case , __snake_case , __snake_case , __snake_case )
intro_sort(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase_ =p
return insertion_sort(__snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Optional[Any] = input("""Enter numbers separated by a comma : """).strip()
a_ : List[Any] = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 75 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , a : List[str] , a : List[Any]=2 , a : Optional[Any]=32 , a : List[Any]=16 , a : Optional[Any]=3 , a : List[str]=True , a : List[str]=True , a : Tuple=32 , a : List[str]=4 , a : str=[0, 1, 2, 3] , a : Tuple=4 , a : int=37 , a : Union[str, Any]="gelu" , a : int=0.1 , a : List[str]=0.1 , a : Tuple=0.02 , a : Optional[int]=3 , a : Any=[1, 384, 24, 24] , a : Union[str, Any]=True , a : Any=None , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = backbone_out_indices
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : List[Any] = backbone_featmap_shape
SCREAMING_SNAKE_CASE : int = scope
SCREAMING_SNAKE_CASE : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : int = num_patches + 1
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self : Any , a : str , a : Union[str, Any] , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DPTModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : str , a : Dict , a : Dict , a : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = DPTForDepthEstimation(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Union[str, Any] , a : Any , a : List[Any] , a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : str = DPTForSemanticSegmentation(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DPTModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(a )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
if model_class in get_values(a ):
continue
SCREAMING_SNAKE_CASE : Tuple = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : Optional[int] = model(**a ).loss
loss.backward()
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE : Any = model_class(a )
model.to(a )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : List[str] = model(**a ).loss
loss.backward()
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(config=a )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE : Union[str, Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE : Any = DPTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = "add"
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : List[str] = DPTForDepthEstimation(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
SCREAMING_SNAKE_CASE : str = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**a )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a , atol=1e-4 ) ) | 76 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a ) -> None:
lowercase__ : Union[str, Any] = size
lowercase__ : Any = [0] * size
lowercase__ : str = [0] * size
@staticmethod
def _UpperCAmelCase ( a ) -> int:
return index | (index + 1)
@staticmethod
def _UpperCAmelCase ( a ) -> int:
return (index & (index + 1)) - 1
def _UpperCAmelCase ( self , a , a ) -> None:
lowercase__ : Dict = value
while index < self.size:
lowercase__ : List[Any] = self.get_prev(a ) + 1
if current_left_border == index:
lowercase__ : Union[str, Any] = value
else:
lowercase__ : Any = max(a , a , a )
lowercase__ : List[str] = self.get_next(a )
def _UpperCAmelCase ( self , a , a ) -> int:
right -= 1 # Because of right is exclusive
lowercase__ : Dict = 0
while left <= right:
lowercase__ : str = self.get_prev(a )
if left <= current_left:
lowercase__ : Any = max(a , self.tree[right] )
lowercase__ : Dict = current_left
else:
lowercase__ : Any = max(a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( lowercase_ , lowercase_ , **lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(lowercase_ )
model.save_pretrained(lowercase_ )
AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 78 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''char'''
snake_case = '''bpe'''
snake_case = '''wp'''
lowerCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''image_processor''', '''char_tokenizer''']
snake_case = '''ViTImageProcessor'''
snake_case = '''MgpstrTokenizer'''
def __init__( self : List[str] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : str ):
'''simple docstring'''
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
_A = kwargs.pop("feature_extractor" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_A = tokenizer
_A = AutoTokenizer.from_pretrained("gpt2" )
_A = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
_A = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings["input_ids"]
return inputs
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A , _A , _A = sequences
_A = char_preds.size(0 )
_A , _A = self._decode_helper(__UpperCAmelCase , "char" )
_A , _A = self._decode_helper(__UpperCAmelCase , "bpe" )
_A , _A = self._decode_helper(__UpperCAmelCase , "wp" )
_A = []
_A = []
for i in range(__UpperCAmelCase ):
_A = [char_scores[i], bpe_scores[i], wp_scores[i]]
_A = [char_strs[i], bpe_strs[i], wp_strs[i]]
_A = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_A = {}
_A = final_strs
_A = final_scores
_A = char_strs
_A = bpe_strs
_A = wp_strs
return out
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_A = self.char_decode
_A = 1
_A = "[s]"
elif format == DecodeType.BPE:
_A = self.bpe_decode
_A = 2
_A = "#"
elif format == DecodeType.WORDPIECE:
_A = self.wp_decode
_A = 102
_A = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
_A , _A = [], []
_A = pred_logits.size(0 )
_A = pred_logits.size(1 )
_A , _A = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
_A = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
_A = decoder(__UpperCAmelCase )
_A , _A = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
_A = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
_A = preds_str[index].find(__UpperCAmelCase )
_A = preds_str[index][:pred_eos]
_A = preds_index[index].cpu().tolist()
_A = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
_A = preds_max_prob[index][: pred_eos_index + 1]
_A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 79 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(__A , (list, tuple) ) or not all(
isinstance(__A , __A ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = numbers[0]
for i in range(1 , len(__A ) ):
# update the maximum and minimum subarray products
UpperCamelCase__ = numbers[i]
if number < 0:
UpperCamelCase__ , UpperCamelCase__ = min_till_now, max_till_now
UpperCamelCase__ = max(__A , max_till_now * number )
UpperCamelCase__ = min(__A , min_till_now * number )
# update the maximum product found till now
UpperCamelCase__ = max(__A , __A )
return max_prod
| 80 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.