code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = inspect.getfile(accelerate.test_utils )
A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
A = [sys.executable] + distributed_args
execute_subprocess_async(A_ ,env=os.environ.copy() ) | 74 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt'''}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
_UpperCamelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
with open(lowerCAmelCase_ , """r""" ) as f:
__UpperCAmelCase : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class _A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase="<eos>" , **__UpperCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = load_vocab_file(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = dict(enumerate(self.all_tokens ) )
__UpperCAmelCase : Tuple = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__UpperCAmelCase : Optional[Any] = unk_token
__UpperCAmelCase : List[str] = cls_token
__UpperCAmelCase : List[str] = pad_token
__UpperCAmelCase : int = mask_token
__UpperCAmelCase : List[str] = eos_token
__UpperCAmelCase : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self._id_to_token.get(__UpperCAmelCase , self.unk_token )
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self._token_to_id.get(__UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def __A ( self , __UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return text.split()
def __A ( self , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __A ( self ) -> List[str]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __A ( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return self._token_to_id.get(__UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def __A ( self , __UpperCAmelCase ) -> int:
'''simple docstring'''
return self._id_to_token.get(__UpperCAmelCase , self.unk_token )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = [self.cls_token_id]
__UpperCAmelCase : str = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__UpperCAmelCase : Any = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__UpperCAmelCase ) + [1]
return mask
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = os.path.join(__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(__UpperCAmelCase , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(__UpperCAmelCase , special_tokens=__UpperCAmelCase )
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
import numpy as np
import datasets
A : Dict = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
A : Dict = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
A : Optional[int] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = np.array(__lowerCamelCase )
lowerCamelCase__ : List[str] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
lowerCamelCase__ : Tuple = X - np.mean(__lowerCamelCase )
lowerCamelCase__ : Any = np.cov(reference_distribution.T )
try:
lowerCamelCase__ : List[str] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
lowerCamelCase__ : Union[str, Any] = np.linalg.pinv(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 184 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = SwinConfig(image_size=192 )
if "base" in model_name:
__UpperCamelCase = 6
__UpperCamelCase = 128
__UpperCamelCase = (2, 2, 18, 2)
__UpperCamelCase = (4, 8, 16, 32)
elif "large" in model_name:
__UpperCamelCase = 12
__UpperCamelCase = 192
__UpperCamelCase = (2, 2, 18, 2)
__UpperCamelCase = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__UpperCamelCase = window_size
__UpperCamelCase = embed_dim
__UpperCamelCase = depths
__UpperCamelCase = num_heads
return config
def lowercase__ ( __lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if "encoder.mask_token" in name:
__UpperCamelCase = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__UpperCamelCase = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__UpperCamelCase = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__UpperCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__UpperCamelCase = 'layernorm.weight'
if name == "encoder.norm.bias":
__UpperCamelCase = 'layernorm.bias'
if "decoder" in name:
pass
else:
__UpperCamelCase = 'swin.' + name
return name
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : str ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase = orig_state_dict.pop(lowerCAmelCase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__UpperCamelCase = key.split('.' )
__UpperCamelCase = int(key_split[2] )
__UpperCamelCase = int(key_split[4] )
__UpperCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[
dim : dim * 2, :
]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = val[
:dim
]
__UpperCamelCase = val[
dim : dim * 2
]
__UpperCamelCase = val[
-dim:
]
else:
__UpperCamelCase = val
return orig_state_dict
def lowercase__ ( __lowercase : Any , __lowercase : Any , __lowercase : Tuple , __lowercase : int ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = torch.load(lowerCAmelCase_ , map_location='cpu' )['model']
__UpperCamelCase = get_swin_config(lowerCAmelCase_ )
__UpperCamelCase = SwinForMaskedImageModeling(lowerCAmelCase_ )
model.eval()
__UpperCamelCase = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = ViTImageProcessor(size={'height': 192, 'width': 192} )
__UpperCamelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
__UpperCamelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' )
with torch.no_grad():
__UpperCamelCase = model(**lowerCAmelCase_ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
a__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ : Any =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 53 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Dict = 16
__lowerCamelCase : Optional[int] = 32
def A_ ( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Optional[Any]:
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase : int = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : Union[str, Any] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : str = 8
else:
UpperCamelCase : List[Any] = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCamelCase : str = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : List[Any] = mocked_dataloaders # noqa: F811
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
UpperCamelCase : List[Any] = 2
# Initialize accelerator
UpperCamelCase : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : List[Any] = config["lr"]
UpperCamelCase : int = int(config["num_epochs"] )
UpperCamelCase : Dict = int(config["seed"] )
UpperCamelCase : Optional[int] = int(config["batch_size"] )
UpperCamelCase : str = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(_lowerCAmelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : str = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
UpperCamelCase , UpperCamelCase : List[Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate scheduler
UpperCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase : Optional[int] = model(**lowerCAmelCase_ )
UpperCamelCase : List[str] = outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**lowerCAmelCase_ )
UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
UpperCamelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A_ ( ) -> Optional[Any]:
UpperCamelCase : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCamelCase : List[str] = parser.parse_args()
UpperCamelCase : Union[str, Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 52 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "ViTImageProcessor"
SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ) -> List[Any]:
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
lowercase__ : Dict = kwargs.pop('''feature_extractor''' )
lowercase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Tuple:
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
lowercase__ : Dict = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if visual_prompt is not None:
lowercase__ : str = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
lowercase__ : Any = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if visual_prompt is not None and images is not None:
lowercase__ : List[Any] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase__ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase__ : int = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCAmelCase( self ) -> Dict:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 198 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (lowerCamelCase_ ):
'''simple docstring'''
_snake_case : Optional[Any] = '''upernet'''
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=5_1_2 , _UpperCamelCase=0.02 , _UpperCamelCase=[1, 2, 3, 6] , _UpperCamelCase=True , _UpperCamelCase=0.4 , _UpperCamelCase=3_8_4 , _UpperCamelCase=2_5_6 , _UpperCamelCase=1 , _UpperCamelCase=False , _UpperCamelCase=2_5_5 , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(**_UpperCamelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCAmelCase_ : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = backbone_config.get('model_type' )
UpperCAmelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Optional[int] = config_class.from_dict(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = backbone_config
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[Any] = pool_scales
UpperCAmelCase_ : Tuple = use_auxiliary_head
UpperCAmelCase_ : Tuple = auxiliary_loss_weight
UpperCAmelCase_ : int = auxiliary_in_channels
UpperCAmelCase_ : str = auxiliary_channels
UpperCAmelCase_ : str = auxiliary_num_convs
UpperCAmelCase_ : Union[str, Any] = auxiliary_concat_input
UpperCAmelCase_ : Any = loss_ignore_index
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.backbone_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 29 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase : Any = logging.get_logger(__name__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: Any = to_pil_image(lowerCAmelCase_)
__snake_case , __snake_case: Any = pil_image.size
__snake_case: Tuple = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type="""dict""" , config=lowerCAmelCase_)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case: List[Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__snake_case: str = [idx for idx, word in enumerate(lowerCAmelCase_) if not word.strip()]
__snake_case: Optional[int] = [word for idx, word in enumerate(lowerCAmelCase_) if idx not in irrelevant_indices]
__snake_case: Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_) if idx not in irrelevant_indices]
__snake_case: List[Any] = [coord for idx, coord in enumerate(lowerCAmelCase_) if idx not in irrelevant_indices]
__snake_case: Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_) if idx not in irrelevant_indices]
__snake_case: Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__snake_case: List[str] = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
__snake_case: str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_)
# finally, normalize the bounding boxes
__snake_case: Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_))
assert len(lowerCAmelCase_) == len(lowerCAmelCase_), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __snake_case ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self : int , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : float = 1 / 255 , A : bool = True , A : Union[float, Iterable[float]] = None , A : Union[float, Iterable[float]] = None , A : bool = True , A : Optional[str] = None , A : Optional[str] = "" , **A : Any , ):
super().__init__(**A )
__snake_case: Any = size if size is not None else {"""height""": 224, """width""": 224}
__snake_case: Optional[Any] = get_size_dict(A )
__snake_case: str = do_resize
__snake_case: Optional[int] = size
__snake_case: Optional[Any] = resample
__snake_case: List[str] = do_rescale
__snake_case: List[str] = rescale_value
__snake_case: Dict = do_normalize
__snake_case: Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case: Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
__snake_case: List[Any] = apply_ocr
__snake_case: Optional[int] = ocr_lang
__snake_case: int = tesseract_config
def UpperCAmelCase__ ( self : Optional[int] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BILINEAR , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ):
__snake_case: Union[str, Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__snake_case: Dict = (size["""height"""], size["""width"""])
return resize(A , size=A , resample=A , data_format=A , **A )
def UpperCAmelCase__ ( self : Tuple , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[int] , ):
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase__ ( self : Any , A : np.ndarray , A : Union[float, Iterable[float]] , A : Union[float, Iterable[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase__ ( self : Optional[int] , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : str=None , A : bool = None , A : float = None , A : bool = None , A : Union[float, Iterable[float]] = None , A : Union[float, Iterable[float]] = None , A : bool = None , A : Optional[str] = None , A : Optional[str] = None , A : Optional[Union[str, TensorType]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Optional[Any] , ):
__snake_case: List[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case: Dict = size if size is not None else self.size
__snake_case: List[str] = get_size_dict(A )
__snake_case: List[Any] = resample if resample is not None else self.resample
__snake_case: Dict = do_rescale if do_rescale is not None else self.do_rescale
__snake_case: str = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case: List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case: List[str] = image_std if image_std is not None else self.image_std
__snake_case: Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__snake_case: str = ocr_lang if ocr_lang is not None else self.ocr_lang
__snake_case: int = tesseract_config if tesseract_config is not None else self.tesseract_config
__snake_case: Tuple = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
__snake_case: str = [to_numpy_array(A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
__snake_case: Union[str, Any] = []
__snake_case: Dict = []
for image in images:
__snake_case , __snake_case: Tuple = apply_tesseract(A , A , A )
words_batch.append(A )
boxes_batch.append(A )
if do_resize:
__snake_case: int = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_rescale:
__snake_case: Optional[int] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
__snake_case: Optional[Any] = [self.normalize(image=A , mean=A , std=A ) for image in images]
__snake_case: str = [to_channel_dimension_format(A , A ) for image in images]
__snake_case: Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=A )
if apply_ocr:
__snake_case: str = words_batch
__snake_case: Tuple = boxes_batch
return data
| 111 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(__UpperCAmelCase ) )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = [sequences]
lowerCAmelCase__ : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=ZeroShotClassificationArgumentHandler() ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : int = args_parser
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=TruncationStrategy.ONLY_FIRST ,**__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
lowerCAmelCase__ : Optional[Any] = self.tokenizer.eos_token
try:
lowerCAmelCase__ : Dict = self.tokenizer(
__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,)
except Exception as e:
if "too short" in str(__UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCAmelCase__ : str = self.tokenizer(
__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=TruncationStrategy.DO_NOT_TRUNCATE ,)
else:
raise e
return inputs
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
if kwargs.get("""multi_class""" ,__UpperCAmelCase ) is not None:
lowerCAmelCase__ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : Union[str, Any] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Union[str, Any] = kwargs["""hypothesis_template"""]
lowerCAmelCase__ : Dict = {}
if "multi_label" in kwargs:
lowerCAmelCase__ : List[str] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ,) -> List[Any]:
if len(__UpperCAmelCase ) == 0:
pass
elif len(__UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
lowerCAmelCase__ : Union[str, Any] = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This example is {}." ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self._args_parser(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__UpperCAmelCase ,__UpperCAmelCase ) ):
lowerCAmelCase__ : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__UpperCAmelCase ) - 1,
**model_input,
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = inputs["""candidate_label"""]
lowerCAmelCase__ : Union[str, Any] = inputs["""sequence"""]
lowerCAmelCase__ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCAmelCase__ : Dict = self.model(**__UpperCAmelCase )
lowerCAmelCase__ : int = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int:
lowerCAmelCase__ : List[str] = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCAmelCase__ : str = [outputs["""sequence"""] for outputs in model_outputs]
lowerCAmelCase__ : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
lowerCAmelCase__ : List[str] = logits.shape[0]
lowerCAmelCase__ : List[Any] = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = N // n
lowerCAmelCase__ : Any = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCAmelCase__ : Tuple = self.entailment_id
lowerCAmelCase__ : Union[str, Any] = -1 if entailment_id == 0 else 0
lowerCAmelCase__ : Dict = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCAmelCase__ : Union[str, Any] = np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 ,keepdims=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCAmelCase__ : Any = reshaped_outputs[..., self.entailment_id]
lowerCAmelCase__ : Optional[Any] = np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 ,keepdims=__UpperCAmelCase )
lowerCAmelCase__ : str = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Tuple ):
return int((input_a, input_a).count(1 ) != 0 )
def a_ ( ):
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 284 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowerCamelCase_ , unittest.TestCase ):
__lowerCAmelCase : Any = GPTSanJapaneseTokenizer
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Optional[Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowerCamelCase ( self :List[str] ):
super().setUp()
# fmt: off
snake_case__ : Tuple = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
snake_case__ : str = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
snake_case__ : str = {'''unk_token''': '''<unk>'''}
snake_case__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file ,'''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def __lowerCamelCase ( self :Dict ,**__lowercase :List[str] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[str] ):
snake_case__ : Any = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
snake_case__ : Any = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __lowerCamelCase ( self :List[str] ,__lowercase :Tuple ):
snake_case__ , snake_case__ : Dict = self.get_input_output_texts(__lowercase )
snake_case__ : Union[str, Any] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : int = tokenizer.decode(__lowercase ,clean_up_tokenization_spaces=__lowercase )
return text, ids
def __lowerCamelCase ( self :List[Any] ):
pass # TODO add if relevant
def __lowerCamelCase ( self :str ):
pass # TODO add if relevant
def __lowerCamelCase ( self :Any ):
pass # TODO add if relevant
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.get_tokenizer()
# Testing tokenization
snake_case__ : List[str] = '''こんにちは、世界。 こんばんは、㔺界。'''
snake_case__ : List[Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
snake_case__ : List[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : str = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
# Testing conversion to ids with special tokens
snake_case__ : Dict = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
snake_case__ : str = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :int ):
snake_case__ : List[str] = self.get_tokenizer()
# Testing tokenization
snake_case__ : List[str] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
snake_case__ : List[Any] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
snake_case__ : int = tokenizer.encode(__lowercase )
snake_case__ : List[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase ,__lowercase )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
snake_case__ : List[str] = '''こんにちは、世界。'''
snake_case__ : str = '''こんばんは、㔺界。😀'''
snake_case__ : Any = '''こんにちは、世界。こんばんは、世界。😀'''
snake_case__ : List[str] = tokenizer.encode(prefix_text + input_text )
snake_case__ : Optional[int] = tokenizer.encode('''''' ,prefix_text=prefix_text + input_text )
snake_case__ : str = tokenizer.encode(__lowercase ,prefix_text=__lowercase )
snake_case__ : Optional[int] = tokenizer.decode(__lowercase )
snake_case__ : Any = tokenizer.decode(__lowercase )
snake_case__ : Tuple = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
@slow
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
snake_case__ : Any = '''こんにちは、世界。'''
snake_case__ : Tuple = '''こんばんは、㔺界。😀'''
snake_case__ : Dict = len(tokenizer.encode(__lowercase ) ) - 2
snake_case__ : List[Any] = len(tokenizer.encode(__lowercase ) ) - 2
snake_case__ : Optional[int] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : List[str] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer('''''' ,prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer(__lowercase ,prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
@slow
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
snake_case__ : Tuple = tokenizer.encode('''あンいワ''' )
snake_case__ : str = tokenizer.encode('''''' ,prefix_text='''あンいワ''' )
snake_case__ : List[str] = tokenizer.encode('''いワ''' ,prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(__lowercase ) ,tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) ,tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase ,__lowercase )
self.assertNotEqual(__lowercase ,__lowercase )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
snake_case__ : List[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
snake_case__ : List[Any] = tokenizer(__lowercase ,padding=__lowercase )
snake_case__ : Tuple = tokenizer.batch_encode_plus(__lowercase ,padding=__lowercase )
# fmt: off
snake_case__ : List[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
snake_case__ : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[int] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,__lowercase )
self.assertListEqual(x_token.token_type_ids ,__lowercase )
self.assertListEqual(x_token.attention_mask ,__lowercase )
self.assertListEqual(x_token_a.input_ids ,__lowercase )
self.assertListEqual(x_token_a.token_type_ids ,__lowercase )
self.assertListEqual(x_token_a.attention_mask ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCamelCase ( self :Tuple ):
# tokenizer has no padding token
pass
| 230 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
with self.assertRaises(A_ ):
A = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
with self.assertRaises(A_ ):
A = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A = pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
A = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A = pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
import PIL.Image
A = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=A_ ) as mock_cast_to_python_objects:
A = pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
A , A = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,A_ )
self.assertFalse(kwargs['optimize_list_casting'] )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
A = pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
A = pa.ipc.open_stream(lowerCAmelCase_ )
A = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( snake_case__ : Dict , snake_case__ : Union[str, Any] ):
A = pa.BufferOutputStream()
A = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ):
A = pa.BufferOutputStream()
A = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_ , features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
A = pa.BufferReader(output.getvalue() )
A = pa.ipc.open_stream(lowerCAmelCase_ )
A = f.read_all()
A = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def _snake_case ( snake_case__ : Union[str, Any] ):
A = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ , hash_salt='split_name' , check_duplicates=lowerCAmelCase_ , ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
A , A = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _snake_case ( snake_case__ : Optional[int] ):
A = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ , hash_salt='split_name' , check_duplicates=lowerCAmelCase_ , ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
A , A = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _snake_case ( snake_case__ : Tuple ):
A = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ , hash_salt='split_name' , check_duplicates=lowerCAmelCase_ , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[Any] ):
A = pa.BufferOutputStream()
A = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
A = pa.BufferOutputStream()
A = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _snake_case ( snake_case__ : int , snake_case__ : Any ):
A = pa.BufferOutputStream()
A = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
A = {'col_1': pa.string(), 'col_2': pa.intaa()}
A = os.path.join(lowerCAmelCase_ , 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_ , schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_ , 1 )
def _snake_case ( snake_case__ : List[str] ):
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any ):
if isinstance(lst[0] , lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0] , lowerCAmelCase_ )
else:
A = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
A = pa.array(TypedSequence(lowerCAmelCase_ , optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( snake_case__ : Tuple , snake_case__ : str , snake_case__ : int ):
A = pa.array(OptimizedTypedSequence(lowerCAmelCase_ , col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
A = copy.deepcopy(lowerCAmelCase_ )
A = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_ , lowerCAmelCase_ )
A = pa.array(OptimizedTypedSequence(lowerCAmelCase_ , col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def _snake_case ( snake_case__ : Dict , snake_case__ : List[Any] ):
A = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _snake_case ( snake_case__ : Any ):
A = 'mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def _snake_case ( ):
A = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
A , A = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
A = pa.BufferReader(output.getvalue() )
A = pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict ):
import PIL.Image
A = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCAmelCase_ , format='png' )
A = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_ , features=Features({'image': Image()} ) , embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
A = pa.BufferReader(output.getvalue() )
A = pq.read_table(lowerCAmelCase_ )
A = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _snake_case ( ):
A = pa.schema([pa.field('col_1' , pa.string() , nullable=lowerCAmelCase_ )] )
A = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] ) | 74 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_UpperCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_UpperCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_UpperCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
A : Tuple = "\\n Text data.\n Second line of data."
A : Any = "file"
@pytest.fixture(scope="session" )
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
lowerCamelCase__ : Any = bytes(lowerCAmelCase_ , "utf-8" )
with zstd.open(lowerCAmelCase_ , "wb" ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase_ ) , "w" ) as f:
f.write(lowerCAmelCase_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowercase_ ( _A : Optional[int] , _A : str , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
lowerCamelCase__ : str = input_paths[compression_format]
lowerCamelCase__ : List[Any] = tmp_path / "cache"
lowerCamelCase__ : int = DownloadConfig(cache_dir=lowerCAmelCase_ , extract_compressed_file=lowerCAmelCase_ )
lowerCamelCase__ : List[Any] = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ )
with open(lowerCAmelCase_ ) as f:
lowerCamelCase__ : Any = f.read()
with open(lowerCAmelCase_ ) as f:
lowerCamelCase__ : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowercase_ ( _A : str , _A : Optional[Any] , _A : int , _A : Tuple , _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = "custom_cache"
lowerCamelCase__ : int = "custom_extracted_dir"
lowerCamelCase__ : int = tmp_path / "custom_extracted_path"
if default_extracted:
lowerCamelCase__ : Any = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , lowerCAmelCase_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase_ ) )
lowerCamelCase__ : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowerCamelCase__ : Dict = xz_file
lowerCamelCase__ : Union[str, Any] = (
DownloadConfig(extract_compressed_file=lowerCAmelCase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase_ )
)
lowerCamelCase__ : Tuple = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ )
assert Path(lowerCAmelCase_ ).parent.parts[-2:] == expected
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int = str(Path(lowerCAmelCase_ ).resolve() )
assert cached_path(lowerCAmelCase_ ) == text_file
# relative path
lowerCamelCase__ : Union[str, Any] = str(Path(lowerCAmelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase_ ) == text_file
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCAmelCase_ ):
cached_path(lowerCAmelCase_ )
# relative path
lowerCamelCase__ : Dict = "./__missing_file__.txt"
with pytest.raises(lowerCAmelCase_ ):
cached_path(lowerCAmelCase_ )
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = get_from_cache(F"tmp://{tmpfs_file}" )
with open(lowerCAmelCase_ ) as f:
lowerCamelCase__ : int = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ )
def lowercase_ ( ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ )
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase_ ):
http_get("https://huggingface.co" , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ )
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase_ ):
ftp_get("ftp://huggingface.co" , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase_ ):
fsspec_get("s3://huggingface.co" , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
fsspec_head("s3://huggingface.co" )
| 184 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.dummy_uncond_unet
__UpperCamelCase = ScoreSdeVeScheduler()
__UpperCamelCase = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__A ).images
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__A , return_dict=__A )[
0
]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : int ):
__UpperCamelCase = 'google/ncsnpp-church-256'
__UpperCamelCase = UNetaDModel.from_pretrained(__A )
__UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(__A )
__UpperCamelCase = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 53 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class A__ ( lowerCamelCase_ ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 52 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a: List[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Any:
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __lowerCAmelCase , )
super().__init__(args=__lowerCAmelCase , **__lowerCAmelCase )
| 198 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
import random
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = num - 1
UpperCAmelCase_ : Tuple = 0
while s % 2 == 0:
UpperCAmelCase_ : int = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_ : List[str] = random.randrange(2 , num - 1 )
UpperCAmelCase_ : int = pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if v != 1:
UpperCAmelCase_ : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_ : Optional[Any] = i + 1
UpperCAmelCase_ : Optional[int] = (v**2) % num
return True
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if num < 2:
return False
UpperCAmelCase_ : str = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase_ )
def lowercase__ ( __snake_case : int = 1_024 ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 29 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class __snake_case ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = """xlm"""
lowerCAmelCase__ = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : Optional[Any] , A : Optional[Any]=30_145 , A : Optional[int]=2_048 , A : List[Any]=12 , A : List[str]=16 , A : Dict=0.1 , A : int=0.1 , A : Union[str, Any]=True , A : Optional[int]=False , A : Any=False , A : Tuple=False , A : Union[str, Any]=1 , A : Any=True , A : Tuple=512 , A : Dict=2_048**-0.5 , A : Union[str, Any]=1E-12 , A : List[Any]=0.02 , A : List[str]=0 , A : int=1 , A : Optional[Any]=2 , A : Optional[int]=3 , A : Union[str, Any]=5 , A : List[str]=True , A : Union[str, Any]="first" , A : Optional[int]=True , A : int=None , A : List[Any]=True , A : str=0.1 , A : List[Any]=5 , A : Optional[Any]=5 , A : Optional[Any]=0 , A : List[Any]=0 , A : List[str]=2 , A : Optional[Any]=0 , **A : Any , ):
__snake_case: int = vocab_size
__snake_case: Optional[int] = emb_dim
__snake_case: str = n_layers
__snake_case: Union[str, Any] = n_heads
__snake_case: int = dropout
__snake_case: Optional[Any] = attention_dropout
__snake_case: Tuple = gelu_activation
__snake_case: str = sinusoidal_embeddings
__snake_case: List[Any] = causal
__snake_case: Union[str, Any] = asm
__snake_case: Optional[Any] = n_langs
__snake_case: Union[str, Any] = use_lang_emb
__snake_case: Optional[Any] = layer_norm_eps
__snake_case: Dict = bos_index
__snake_case: Optional[int] = eos_index
__snake_case: Optional[int] = pad_index
__snake_case: List[str] = unk_index
__snake_case: List[str] = mask_index
__snake_case: Any = is_encoder
__snake_case: Tuple = max_position_embeddings
__snake_case: Optional[int] = embed_init_std
__snake_case: Any = init_std
__snake_case: Dict = summary_type
__snake_case: Tuple = summary_use_proj
__snake_case: int = summary_activation
__snake_case: Union[str, Any] = summary_proj_to_labels
__snake_case: Tuple = summary_first_dropout
__snake_case: Optional[Any] = start_n_top
__snake_case: List[Any] = end_n_top
__snake_case: Tuple = mask_token_id
__snake_case: int = lang_id
if "n_words" in kwargs:
__snake_case: Tuple = kwargs["""n_words"""]
super().__init__(pad_token_id=A , bos_token_id=A , **A )
class __snake_case ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case: int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case: Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCAmelCase = logging.getLogger(__name__)
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=-1 ) -> Any:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ : Optional[Any] = label_idx
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = mode.value
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,F"""{mode}.txt""" )
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Optional[Any] = []
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as f:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Union[str, Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
guid_index += 1
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Dict = []
else:
lowerCAmelCase__ : int = line.split(""" """ )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" ,"""""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
return examples
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(__UpperCAmelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" ,line.split()[0] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if path:
with open(__UpperCAmelCase ,"""r""" ) as f:
lowerCAmelCase__ : Optional[int] = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self ) -> Optional[int]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if path:
with open(__UpperCAmelCase ,"""r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase_( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Any = mode.value
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,F"""{mode}.txt""" )
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Optional[int] = []
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : List[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = 0
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = preds_list[example_id]
lowerCAmelCase__ : List[str] = """"""
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if path:
with open(__UpperCAmelCase ,"""r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _UpperCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = SMALL_MODEL_IDENTIFIER
__lowerCAmelCase = 'pt'
__lowerCAmelCase = 'tf'
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase_ )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> Tuple:
__lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase_ )
model_tf.save_pretrained(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = 'mock_framework'
# Framework provided - return whatever the user provides
__lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
__lowerCAmelCase = FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
__lowerCAmelCase = FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Union[str, Any]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
__lowerCAmelCase = FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
__lowerCAmelCase = FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FeaturesManager.determine_framework(lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = MagicMock(return_value=lowerCAmelCase_ )
with patch('transformers.onnx.features.is_tf_available' , lowerCAmelCase_ ):
__lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCAmelCase = MagicMock(return_value=lowerCAmelCase_ )
with patch('transformers.onnx.features.is_torch_available' , lowerCAmelCase_ ):
__lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCAmelCase = MagicMock(return_value=lowerCAmelCase_ )
__lowerCAmelCase = MagicMock(return_value=lowerCAmelCase_ )
with patch('transformers.onnx.features.is_tf_available' , lowerCAmelCase_ ), patch(
'transformers.onnx.features.is_torch_available' , lowerCAmelCase_ ):
__lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCAmelCase = MagicMock(return_value=lowerCAmelCase_ )
__lowerCAmelCase = MagicMock(return_value=lowerCAmelCase_ )
with patch('transformers.onnx.features.is_tf_available' , lowerCAmelCase_ ), patch(
'transformers.onnx.features.is_torch_available' , lowerCAmelCase_ ):
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 284 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
import logging
import os
from .state import PartialState
class a ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( __lowercase :Dict ):
snake_case__ : Optional[int] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self :int ,__lowercase :Dict ,__lowercase :int ,*__lowercase :str ,**__lowercase :List[str] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
snake_case__ : List[Any] = kwargs.pop('''main_process_only''' ,__lowercase )
snake_case__ : int = kwargs.pop('''in_order''' ,__lowercase )
if self.isEnabledFor(__lowercase ):
if self._should_log(__lowercase ):
snake_case__ , snake_case__ : Optional[int] = self.process(__lowercase ,__lowercase )
self.logger.log(__lowercase ,__lowercase ,*__lowercase ,**__lowercase )
elif in_order:
snake_case__ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case__ , snake_case__ : Union[str, Any] = self.process(__lowercase ,__lowercase )
self.logger.log(__lowercase ,__lowercase ,*__lowercase ,**__lowercase )
state.wait_for_everyone()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]:
"""simple docstring"""
if log_level is None:
snake_case__ : List[Any] = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowerCAmelCase_ )
snake_case__ : List[Any] = logging.getLogger(lowerCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase_ , {} )
| 230 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _snake_case ( snake_case__ : List[str] , snake_case__ : int ):
A = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
A = DatasetInfosDict.from_directory(lowerCAmelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _snake_case ( snake_case__ : Any , snake_case__ : List[str] ):
A = str(lowerCAmelCase_ )
dataset_info.write_to_directory(lowerCAmelCase_ )
A = DatasetInfo.from_directory(lowerCAmelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase_ , 'dataset_info.json' ) )
def _snake_case ( ):
A = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
A = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
A = yaml.safe_dump(lowerCAmelCase_ )
A = yaml.safe_load(lowerCAmelCase_ )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ):
A = DatasetInfo()
A = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
A = str(lowerCAmelCase_ )
dataset_infos_dict.write_to_directory(lowerCAmelCase_ )
A = DatasetInfosDict.from_directory(lowerCAmelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase_ , 'README.md' ) ) | 74 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : List[Any] = DPRContextEncoderTokenizer
class _A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Tuple = DPRQuestionEncoderTokenizer
_UpperCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_UpperCamelCase = r'''\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '''
@add_start_docstrings(lowerCamelCase_ )
class _A :
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Any:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
elif titles is None or texts is None:
__UpperCAmelCase : str = titles if texts is None else texts
return super().__call__(
__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [titles]
__UpperCAmelCase : Optional[int] = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [texts]
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [questions] * n_passages
assert len(__UpperCAmelCase ) == len(
__UpperCAmelCase ), f'There should be as many titles than texts but got {len(__UpperCAmelCase )} titles and {len(__UpperCAmelCase )} texts.'
__UpperCAmelCase : Any = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["""input_ids"""]
__UpperCAmelCase : Optional[Any] = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["""input_ids"""]
__UpperCAmelCase : Tuple = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : List[Any] = attention_mask
return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = 4 , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = reader_input["""input_ids"""]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = reader_output[:3]
__UpperCAmelCase : int = len(__UpperCAmelCase )
__UpperCAmelCase : str = sorted(range(__UpperCAmelCase ) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__ )
__UpperCAmelCase : Any = []
for doc_id in sorted_docs:
__UpperCAmelCase : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : Tuple = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : str = len(__UpperCAmelCase )
__UpperCAmelCase : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = []
for start_index, start_score in enumerate(__UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : Dict = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
__UpperCAmelCase : str = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
__UpperCAmelCase : List[str] = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase_ )
class _A ( lowerCamelCase_ , lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = READER_PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = DPRReaderTokenizer
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : Any = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Union[str, Any] =logging.get_logger(__name__)
a__ : Dict ={
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="data2vec-text"
def __init__( self : int , __A : Dict=3_0_5_2_2 , __A : str=7_6_8 , __A : Any=1_2 , __A : Tuple=1_2 , __A : Optional[int]=3_0_7_2 , __A : List[Any]="gelu" , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : List[str]=5_1_2 , __A : Optional[Any]=2 , __A : Optional[Any]=0.02 , __A : Dict=1e-12 , __A : Optional[int]=1 , __A : Dict=0 , __A : Any=2 , __A : List[str]="absolute" , __A : str=True , __A : int=None , **__A : Union[str, Any] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : str ):
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 53 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Any = """▁"""
__lowerCamelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( lowerCamelCase_ , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = BertGenerationTokenizer
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :str = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : Optional[Any] = BertGenerationTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<s>"
UpperCamelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(A_ ) , 1002 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = BertGenerationTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [285, 46, 10, 170, 382] , )
UpperCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase : Dict = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = "Hello World!"
UpperCamelCase : List[str] = [1_8536, 2260, 101]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCamelCase : Dict = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@require_torch
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase : str = " ".join(A_ )
UpperCamelCase : Optional[int] = self.big_tokenizer.encode_plus(A_ , return_tensors="pt" , return_token_type_ids=A_ )
UpperCamelCase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=A_ )
UpperCamelCase : Union[str, Any] = BertGenerationConfig()
UpperCamelCase : Any = BertGenerationEncoder(A_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A_ )
model(**A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = {"input_ids": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 52 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a: str = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase__ : Tuple = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowercase__ : str = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase__ : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase__ : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = dct.pop(lowerCAmelCase_ )
lowercase__ : Any = val
def __UpperCamelCase ( UpperCAmelCase ):
if "handwritten" in checkpoint_url:
lowercase__ : Optional[int] = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : List[Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowercase__ : Optional[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=lowerCAmelCase_ )
lowercase__ : Optional[int] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase__ : Dict = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : Optional[int] = 24
lowercase__ : Optional[Any] = 16
lowercase__ : Optional[int] = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : Tuple = False
lowercase__ : str = '''relu'''
lowercase__ : Any = 1024
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
# load HuggingFace model
lowercase__ : str = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ )
lowercase__ : int = TrOCRForCausalLM(lowerCAmelCase_ )
lowercase__ : Optional[int] = VisionEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
model.eval()
# load state_dict of original model, rename some keys
lowercase__ : int = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' , check_hash=lowerCAmelCase_ )['''model''']
lowercase__ : Tuple = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase__ : Dict = state_dict.pop(lowerCAmelCase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowercase__ : str = val
else:
lowercase__ : Union[str, Any] = val
# load state dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
lowercase__ : Tuple = ViTImageProcessor(size=encoder_config.image_size )
lowercase__ : int = RobertaTokenizer.from_pretrained('''roberta-large''' )
lowercase__ : Tuple = TrOCRProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : List[Any] = processor(images=prepare_img(lowerCAmelCase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
lowercase__ : int = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase__ : Any = model(pixel_values=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ )
lowercase__ : List[str] = outputs.logits
lowercase__ : List[Any] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase__ : Tuple = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase__ : List[Any] = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowercase__ : Dict = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowercase__ : int = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCAmelCase_ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__a: List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__a: Dict = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 198 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
__UpperCAmelCase = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'emoji': True,
},
}
]
__UpperCAmelCase = 0
for log in Path().glob('*.log'):
__UpperCAmelCase = 0
with open(log, 'r') as f:
for line in f:
__UpperCAmelCase = json.loads(line)
if line.get('nodeid', '') != "":
__UpperCAmelCase = line['nodeid']
if line.get('duration', None) is not None:
__UpperCAmelCase = F'{line["duration"]:.4f}'
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCAmelCase = []
log.unlink()
__UpperCAmelCase = ''
__UpperCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCAmelCase = []
__UpperCAmelCase = {}
for test in failed_tests:
__UpperCAmelCase = test[0].split('::')
__UpperCAmelCase = data[0].split('/')[-1]
if data[0] not in filesafailed:
__UpperCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCAmelCase = [test[0] for test in failed_table]
__UpperCAmelCase = list(set(files))
# Count number of instances in failed_tests
__UpperCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCAmelCase = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCAmelCase = 'Too many failed tests, please see the full report in the Action results.'
__UpperCAmelCase = len(err) + 10
__UpperCAmelCase = message[: 3000 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
__UpperCAmelCase = 'No failed tests! 🤗'
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__UpperCAmelCase = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
__UpperCAmelCase = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
__UpperCAmelCase = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
__UpperCAmelCase = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__UpperCAmelCase = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCAmelCase = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCAmelCase = row[0]
else:
__UpperCAmelCase = ''
__UpperCAmelCase = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 29 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10) -> List[str]:
__snake_case: Optional[int] = []
for _ in range(lowerCAmelCase_):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10) -> str:
__snake_case: Tuple = []
for step in range(lowerCAmelCase_):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = os.path.join(lowerCAmelCase_ , """schedule.bin""")
torch.save(scheduler.state_dict() , lowerCAmelCase_)
__snake_case: List[str] = torch.load(lowerCAmelCase_)
scheduler.load_state_dict(lowerCAmelCase_)
return lrs
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] , A : List[str] , A : Dict , A : Optional[int] ):
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
__snake_case: int = torch.tensor([0.4, 0.2, -0.5] )
__snake_case: int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__snake_case: Tuple = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__snake_case: Dict = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def UpperCAmelCase__ ( self : str ):
__snake_case: str = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
__snake_case: List[str] = torch.tensor([0.4, 0.2, -0.5] )
__snake_case: str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__snake_case: Union[str, Any] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=A , weight_decay=0.0 , relative_step=A , scale_parameter=A , warmup_init=A , )
for _ in range(1_000 ):
__snake_case: str = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCAmelCase__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase__ = 10
def UpperCAmelCase__ ( self : List[Any] , A : int , A : Any , A : str , A : Tuple=None ):
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A , msg=A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__snake_case: List[str] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__snake_case , __snake_case: Any = data
__snake_case: int = scheduler_func(self.optimizer , **A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__snake_case: List[str] = unwrap_schedule(A , self.num_steps )
self.assertListAlmostEqual(
A , A , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
__snake_case: int = scheduler_func(self.optimizer , **A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(A ) # wrap to test picklability of the schedule
__snake_case: Any = unwrap_and_save_reload_schedule(A , self.num_steps )
self.assertListEqual(A , A , msg=f'''failed for {scheduler_func} in save and reload''' )
class __snake_case :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[int] ):
__snake_case: str = fn
def __call__( self : Optional[int] , *A : List[str] , **A : Any ):
return self.fn(*A , **A )
@classmethod
def UpperCAmelCase__ ( self : Any , A : Optional[int] ):
__snake_case: Optional[Any] = list(map(self , scheduler.lr_lambdas ) )
| 111 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ : Optional[Any] = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Optional[int] = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : Dict = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : str = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = dct.pop(lowerCAmelCase_ )
lowerCAmelCase__ : Dict = val
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Union[str, Any] = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : str = 3129
lowerCAmelCase__ : List[Any] = """huggingface/label-files"""
lowerCAmelCase__ : Tuple = """vqa2-id2label.json"""
lowerCAmelCase__ : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Dict = idalabel
lowerCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = ViltForQuestionAnswering(lowerCAmelCase_ )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : List[Any] = {0: """False""", 1: """True"""}
lowerCAmelCase__ : str = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : Any = ViltForImagesAndTextClassification(lowerCAmelCase_ )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = ViltForImageAndTextRetrieval(lowerCAmelCase_ )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = ViltForMaskedLM(lowerCAmelCase_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""state_dict"""]
lowerCAmelCase__ : Optional[Any] = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
if mlm_model or irtr_model:
lowerCAmelCase__ : Optional[int] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCAmelCase_ )
# Define processor
lowerCAmelCase__ : Tuple = ViltImageProcessor(size=384 )
lowerCAmelCase__ : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCAmelCase__ : Any = ViltProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ : Dict = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase_ ).raw )
lowerCAmelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase_ ).raw )
lowerCAmelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCAmelCase__ : Any = processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="""pt""" )
lowerCAmelCase__ : Tuple = processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="""pt""" )
lowerCAmelCase__ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ : Optional[int] = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=lowerCAmelCase_ ).raw )
if mlm_model:
lowerCAmelCase__ : int = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCAmelCase__ : Any = """How many cats are there?"""
lowerCAmelCase__ : str = processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="""pt""" )
lowerCAmelCase__ : List[str] = model(**lowerCAmelCase_ )
# Verify outputs
if mlm_model:
lowerCAmelCase__ : List[Any] = torch.Size([1, 11, 30522] )
lowerCAmelCase__ : Optional[int] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase_ , atol=1e-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ : Optional[int] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ : Tuple = torch.Size([1, 3129] )
lowerCAmelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase_ , atol=1e-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ : Optional[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ : int = torch.Size([1, 2] )
lowerCAmelCase__ : Optional[int] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_snake_case : Any = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = split_dict._to_yaml_list()
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
snake_case__ : int = SplitDict._from_yaml_list(lowerCAmelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case__ : Dict = None
# the split name of split_dict takes over the name of the split info object
snake_case__ : Optional[int] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase_ ), SplitInfo(dataset_name='''my_dataset''' )] )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 230 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger()
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
A = timm.create_model('levit_128s' , pretrained=lowerCAmelCase_ )
else:
A = timm.create_model('levit_128' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 192:
A = timm.create_model('levit_192' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 256:
A = timm.create_model('levit_256' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 384:
A = timm.create_model('levit_384' , pretrained=lowerCAmelCase_ )
from_model.eval()
A = LevitForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
A = OrderedDict()
A = from_model.state_dict()
A = list(from_model.state_dict().keys() )
A = list(our_model.state_dict().keys() )
print(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for i in range(len(lowerCAmelCase_ ) ):
A = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase_ )
A = torch.randn((2, 3, 224, 224) )
A = from_model(lowerCAmelCase_ )
A = our_model(lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), "The model logits don't match the original one."
A = name
print(lowerCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _snake_case ( snake_case__ : str , snake_case__ : List[Any] = None , snake_case__ : Tuple = True ):
A = 'imagenet-1k-id2label.json'
A = 1000
A = (1, num_labels)
A = 'huggingface/label-files'
A = num_labels
A = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
A = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = partial(lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
A = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
A = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCAmelCase_ , names_to_config[model_name] , lowerCAmelCase_ , lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
_lowercase = parser.parse_args()
_lowercase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 74 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Tuple=18 , __lowerCamelCase : int=30 , __lowerCamelCase : Optional[int]=400 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=True , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : str = min_resolution
lowerCamelCase__ : Any = max_resolution
lowerCamelCase__ : List[str] = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : List[str] = apply_ocr
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowercase ( lowerCamelCase_ , unittest.TestCase):
"""simple docstring"""
A__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : str = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "apply_ocr" ) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __lowerCamelCase )
self.assertIsInstance(encoding.boxes , __lowerCamelCase )
# Test batched
lowerCamelCase__ : List[str] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__ : int = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase__ : Any = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase__ : Dict = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase__ : Any = image_processing(__lowerCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase__ : int = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase__ : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCamelCase )
self.assertListEqual(encoding.boxes , __lowerCamelCase )
# with apply_OCR = False
lowerCamelCase__ : Any = LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 184 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = [0] * len(lowerCAmelCase_ )
__UpperCamelCase = []
__UpperCamelCase = [1] * len(lowerCAmelCase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase_ )
while queue:
__UpperCamelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCamelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase_ )
print(max(lowerCAmelCase_ ) )
# Adjacency list of Graph
a__ : Any ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 53 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
from __future__ import annotations
from math import pow, sqrt
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase_ , 2 ) + pow(lowerCAmelCase_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a: Optional[int] = logging.get_logger(__name__)
__a: Dict = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a: Union[str, Any] = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a: int = {
"""ctrl""": 2_56,
}
__a: List[str] = {
"""Pregnancy""": 16_86_29,
"""Christianity""": 76_75,
"""Explain""": 10_64_23,
"""Fitness""": 6_34_40,
"""Saving""": 6_31_63,
"""Ask""": 2_71_71,
"""Ass""": 9_59_85,
"""Joke""": 16_35_09,
"""Questions""": 4_56_22,
"""Thoughts""": 4_96_05,
"""Retail""": 5_23_42,
"""Feminism""": 16_43_38,
"""Writing""": 1_19_92,
"""Atheism""": 19_22_63,
"""Netflix""": 4_86_16,
"""Computing""": 3_96_39,
"""Opinion""": 4_32_13,
"""Alone""": 4_49_67,
"""Funny""": 5_89_17,
"""Gaming""": 4_03_58,
"""Human""": 40_88,
"""India""": 13_31,
"""Joker""": 7_71_38,
"""Diet""": 3_62_06,
"""Legal""": 1_18_59,
"""Norman""": 49_39,
"""Tip""": 7_26_89,
"""Weight""": 5_23_43,
"""Movies""": 4_62_73,
"""Running""": 2_34_25,
"""Science""": 20_90,
"""Horror""": 3_77_93,
"""Confession""": 6_05_72,
"""Finance""": 1_22_50,
"""Politics""": 1_63_60,
"""Scary""": 19_19_85,
"""Support""": 1_26_54,
"""Technologies""": 3_25_16,
"""Teenage""": 6_61_60,
"""Event""": 3_27_69,
"""Learned""": 6_74_60,
"""Notion""": 18_27_70,
"""Wikipedia""": 3_75_83,
"""Books""": 66_65,
"""Extract""": 7_60_50,
"""Confessions""": 10_27_01,
"""Conspiracy""": 7_59_32,
"""Links""": 6_36_74,
"""Narcissus""": 15_04_25,
"""Relationship""": 5_47_66,
"""Relationships""": 13_47_96,
"""Reviews""": 4_16_71,
"""News""": 42_56,
"""Translation""": 2_68_20,
"""multilingual""": 12_84_06,
}
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = set()
lowercase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : List[Any] = char
lowercase__ : int = set(lowerCAmelCase_ )
return pairs
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = CONTROL_CODES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="<unk>" , **__lowerCAmelCase ) -> Tuple:
super().__init__(unk_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase__ : Tuple = json.load(__lowerCAmelCase )
lowercase__ : Dict = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase__ : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase__ : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
lowercase__ : Optional[int] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Dict = {}
@property
def _lowerCAmelCase( self ) -> Optional[Any]:
return len(self.encoder )
def _lowerCAmelCase( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
if token in self.cache:
return self.cache[token]
lowercase__ : Optional[int] = tuple(__lowerCAmelCase )
lowercase__ : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase__ : Union[str, Any] = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
lowercase__ : List[Any] = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Optional[int] = bigram
lowercase__ : List[str] = []
lowercase__ : Any = 0
while i < len(__lowerCAmelCase ):
try:
lowercase__ : Optional[int] = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Any = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Union[str, Any] = tuple(__lowerCAmelCase )
lowercase__ : List[str] = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
lowercase__ : Any = get_pairs(__lowerCAmelCase )
lowercase__ : Dict = '''@@ '''.join(__lowerCAmelCase )
lowercase__ : List[str] = word[:-4]
lowercase__ : Union[str, Any] = word
return word
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : List[Any] = []
lowercase__ : List[Any] = re.findall(r'''\S+\n?''' , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[Any] = ''' '''.join(__lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Union[str, Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : List[str] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
lowercase__ : int = 0
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ : Any = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 198 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Dict=False ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Tuple = len(set_a.intersection(lowerCAmelCase_ ) )
if alternative_union:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Optional[int] = len(set_a.union(lowerCAmelCase_ ) )
return intersection / union
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(lowerCAmelCase_ , (list, tuple) ):
UpperCAmelCase_ : Tuple = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / union
else:
UpperCAmelCase_ : int = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return None
if __name__ == "__main__":
__UpperCAmelCase = {'a', 'b', 'c', 'd', 'e'}
__UpperCAmelCase = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 29 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Optional[Any] = torch.nn.Linear(10 ,10 )
lowerCAmelCase__ : Any = torch.optim.SGD(model.parameters() ,0.1 )
lowerCAmelCase__ : str = Accelerator()
lowerCAmelCase__ : Tuple = accelerator.prepare(__UpperCAmelCase )
try:
pickle.loads(pickle.dumps(__UpperCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=9_9 , lowerCAmelCase_ : Tuple=1_6 , lowerCAmelCase_ : Dict=3_6 , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=5_1_2 , lowerCAmelCase_ : int=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Dict=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_hidden_groups
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Optional[int] ) -> str:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> List[str]:
__lowerCAmelCase = AlbertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Dict:
__lowerCAmelCase = AlbertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , sentence_order_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AlbertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
__lowerCAmelCase = AlbertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> List[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = AlbertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
def lowercase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]=False ) -> List[Any]:
__lowerCAmelCase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = AlbertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AlbertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = AlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
| 284 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = set()
snake_case__ : Union[str, Any] = []
def parse_line(__lowerCAmelCase ):
for line in fp:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case__ : int = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase_ ) > 0:
snake_case__ : Optional[int] = '''\n'''.join(lowerCAmelCase_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowerCAmelCase_ )
buffer.clear()
continue
else:
snake_case__ : Any = line.strip()
buffer.append(lowerCAmelCase_ )
if from_gh:
for filename in os.listdir(lowerCAmelCase_ ):
snake_case__ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = set()
snake_case__ : int = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for p in os.listdir(lowerCAmelCase_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase_ , lowerCAmelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
return values.split(''',''' )
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
A__ = parser.parse_args()
A__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A__ = extract_warnings(args.output_dir, args.targets)
A__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 230 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''ConvNextFeatureExtractor''']
_UpperCamelCase = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
import random
def lowercase_ ( _A : str , _A : Optional[Any] , _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int = a[left_index]
lowerCamelCase__ : Any = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase_ ):
if a[j] < pivot:
lowerCamelCase__ , lowerCamelCase__ : Tuple = a[i], a[j]
i += 1
lowerCamelCase__ , lowerCamelCase__ : Dict = a[i - 1], a[left_index]
return i - 1
def lowercase_ ( _A : List[Any] , _A : List[Any] , _A : str ):
"""simple docstring"""
if left < right:
lowerCamelCase__ : Union[str, Any] = random.randint(lowerCAmelCase_ , right - 1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCamelCase__ : Optional[int] = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = input("Enter numbers separated by a comma:\n" ).strip()
lowerCamelCase__ : Any = [int(lowerCAmelCase_ ) for item in user_input.split("," )]
quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 184 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 5
# Realm tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(__A , exist_ok=__A )
def _lowerCamelCase ( self : str ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def _lowerCamelCase ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=__A , )
return block_records
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_config()
__UpperCamelCase = self.get_dummy_retriever()
__UpperCamelCase = retriever.tokenizer
__UpperCamelCase = np.array([0, 3] , dtype='long' )
__UpperCamelCase = tokenizer(['Test question'] ).input_ids
__UpperCamelCase = tokenizer(
['the fourth'] , add_special_tokens=__A , return_token_type_ids=__A , return_attention_mask=__A , ).input_ids
__UpperCamelCase = config.reader_seq_len
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever(
__A , __A , answer_ids=__A , max_length=__A , return_tensors='np' )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.get_config()
__UpperCamelCase = self.get_dummy_retriever()
__UpperCamelCase = retriever.tokenizer
__UpperCamelCase = np.array([0, 3, 5] , dtype='long' )
__UpperCamelCase = tokenizer(['Test question'] ).input_ids
__UpperCamelCase = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=__A , return_token_type_ids=__A , return_attention_mask=__A , ).input_ids
__UpperCamelCase = config.reader_seq_len
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever(
__A , __A , answer_ids=__A , max_length=__A , return_tensors='np' )
self.assertEqual([False, True, True] , __A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __A )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__UpperCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__UpperCamelCase = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__UpperCamelCase = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 53 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Dict = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 52 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 10 ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or n < 0:
raise ValueError('''Invalid input''' )
lowercase__ : Dict = 10**n
lowercase__ : int = 2_8433 * (pow(2 , 783_0457 , lowerCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 198 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCamelCase (lowerCamelCase_ ):
'''simple docstring'''
_snake_case : Optional[int] = '''xmod'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase="absolute" , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=("en_XX",) , _UpperCamelCase=None , **_UpperCamelCase , ) -> int:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Dict = use_cache
UpperCAmelCase_ : Dict = classifier_dropout
UpperCAmelCase_ : Any = pre_norm
UpperCAmelCase_ : List[Any] = adapter_reduction_factor
UpperCAmelCase_ : List[str] = adapter_layer_norm
UpperCAmelCase_ : Tuple = adapter_reuse_layer_norm
UpperCAmelCase_ : Optional[Any] = ln_before_adapter
UpperCAmelCase_ : List[str] = list(_UpperCamelCase )
UpperCAmelCase_ : Any = default_language
class lowerCamelCase (lowerCamelCase_ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Dict:
if self.task == "multiple-choice":
UpperCAmelCase_ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 29 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
from __future__ import annotations
from typing import Any
class __snake_case :
'''simple docstring'''
def __init__( self : str , A : int = 6 ):
__snake_case: Tuple = None
__snake_case: Union[str, Any] = None
self.create_linked_list(A )
def UpperCAmelCase__ ( self : Optional[Any] , A : int ):
__snake_case: Tuple = Node()
__snake_case: Any = current_node
__snake_case: Optional[Any] = current_node
__snake_case: str = current_node
for _ in range(1 , A ):
__snake_case: Dict = Node()
__snake_case: int = current_node
__snake_case: str = previous_node
__snake_case: str = current_node
__snake_case: str = self.front
__snake_case: Optional[Any] = previous_node
def UpperCAmelCase__ ( self : Union[str, Any] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase__ ( self : Dict ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__snake_case: Optional[int] = self.rear.next
if self.rear:
__snake_case: int = data
def UpperCAmelCase__ ( self : str ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__snake_case: Union[str, Any] = self.front.data
__snake_case: List[Any] = None
return data
__snake_case: Optional[Any] = self.front
__snake_case: int = old_front.next
__snake_case: Optional[Any] = old_front.data
__snake_case: Any = None
return data
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def UpperCAmelCase__ ( self : List[Any] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __snake_case :
'''simple docstring'''
def __init__( self : str ):
__snake_case: str = None
__snake_case: Optional[Any] = None
__snake_case: int = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = TapasConfig.from_json_file(lowerCAmelCase_ )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ : Union[str, Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ : int = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : List[Any] = True
# hparam_utils.py hparams
lowerCAmelCase__ : str = 0.66_4694
lowerCAmelCase__ : List[str] = 0.20_7951
lowerCAmelCase__ : List[str] = 0.12_1194
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Tuple = 0.035_2513
lowerCAmelCase__ : List[str] = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : int = False
# hparam_utils.py hparams
lowerCAmelCase__ : Tuple = 36.4519
lowerCAmelCase__ : Any = 0.90_3421
lowerCAmelCase__ : Optional[Any] = 222.088
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[int] = 0.76_3141
lowerCAmelCase__ : Union[str, Any] = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "TABFACT":
lowerCAmelCase__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase_ )
elif task == "MLM":
lowerCAmelCase__ : Optional[int] = TapasForMaskedLM(config=lowerCAmelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ : Optional[Any] = TapasModel(config=lowerCAmelCase_ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase__ : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase_ )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[str] = '▁'
_snake_case : str = {'vocab_file': 'sentencepiece.bpe.model'}
_snake_case : Any = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_snake_case : int = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
_snake_case : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _UpperCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
a_ = []
def __init__( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Dict="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : List[str]="<mask>" , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : List[Any] , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
__lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase = 1
__lowerCAmelCase = len(self.sp_model )
__lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_ )
}
__lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowerCAmelCase = src_lang if src_lang is not None else 'eng_Latn'
__lowerCAmelCase = self.lang_code_to_id[self._src_lang]
__lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ) -> List[Any]:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
__lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , lowerCAmelCase_ : Optional[int] ) -> Tuple:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase ( self : Dict ) -> List[Any]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase ( self : Tuple ) -> List[str]:
return self._src_lang
@src_lang.setter
def lowercase ( self : Tuple , lowerCAmelCase_ : str ) -> Any:
__lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [1] * len(self.prefix_tokens )
__lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + ([0] * len(lowerCAmelCase_ )) + suffix_ones
def lowercase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> Optional[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> str:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowerCAmelCase = src_lang
__lowerCAmelCase = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase_ )
__lowerCAmelCase = tgt_lang_id
return inputs
def lowercase ( self : int ) -> Any:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : str ) -> Any:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Any ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(lowerCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Any ) -> Any:
__lowerCAmelCase = ''.join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , ' ' ).strip()
return out_string
def lowercase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Dict:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "eng_Latn" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "fra_Latn" , **lowerCAmelCase_ : Dict , ) -> str:
__lowerCAmelCase = src_lang
__lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self : str ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self : List[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
__lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__lowerCAmelCase = []
__lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase = [self.cur_lang_code]
__lowerCAmelCase = [self.eos_token_id]
def lowercase ( self : str , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__lowerCAmelCase = []
__lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase = [self.cur_lang_code]
__lowerCAmelCase = [self.eos_token_id]
| 284 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
import os
from distutils.util import strtobool
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
for e in env_keys:
snake_case__ : str = int(os.environ.get(lowerCAmelCase_ , -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=False ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return strtobool(lowerCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase="no" ) -> Dict:
"""simple docstring"""
snake_case__ : str = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return value
| 230 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
_lowercase = '''\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'''
_lowercase = '''\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'''
_lowercase = '''\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'''
def _snake_case ( snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : str , snake_case__ : Any = None , snake_case__ : List[Any] = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
A = new_id
# turn into Numpy arrays
A = np.array(lowerCAmelCase_ )
A = np.array(lowerCAmelCase_ )
if reduce_labels:
A = 255
A = label - 1
A = 255
A = label != ignore_index
A = np.not_equal(lowerCAmelCase_ , lowerCAmelCase_ )
A = pred_label[mask]
A = np.array(lowerCAmelCase_ )[mask]
A = pred_label[pred_label == label]
A = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
A = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
A = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
A = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] = None , snake_case__ : Dict = False , ):
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
A , A , A , A = intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case ( snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : int = None , snake_case__ : List[Any] = None , snake_case__ : List[Any] = False , ):
A , A , A , A = total_intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# compute metrics
A = {}
A = total_area_intersect.sum() / total_area_label.sum()
A = total_area_intersect / total_area_union
A = total_area_intersect / total_area_label
A = np.nanmean(lowerCAmelCase_ )
A = np.nanmean(lowerCAmelCase_ )
A = all_acc
A = iou
A = acc
if nan_to_num is not None:
A = {metric: np.nan_to_num(lowerCAmelCase_ , nan=lowerCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) ,reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Tuple ,A_ : Optional[int] ,A_ : int ,A_ : bool ,A_ : Optional[int] = None ,A_ : Optional[Dict[int, int]] = None ,A_ : bool = False ,) -> List[Any]:
A = mean_iou(
results=A_ ,gt_seg_maps=A_ ,num_labels=A_ ,ignore_index=A_ ,nan_to_num=A_ ,label_map=A_ ,reduce_labels=A_ ,)
return iou_result | 74 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Union[str, Any] = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
__UpperCAmelCase : List[str] = None
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if sources is int:
__UpperCAmelCase : List[Any] = [sources]
if sinks is int:
__UpperCAmelCase : Optional[Any] = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
__UpperCAmelCase : Dict = sources[0]
__UpperCAmelCase : Tuple = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
__UpperCAmelCase : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__UpperCAmelCase : Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__UpperCAmelCase : List[Any] = max_input_flow
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__UpperCAmelCase : str = max_input_flow
__UpperCAmelCase : Optional[Any] = size - 1
def __A ( self ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = algorithm(self )
class _A :
def __init__( self , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = flow_network
__UpperCAmelCase : int = flow_network.verticesCount
__UpperCAmelCase : str = flow_network.sourceIndex
__UpperCAmelCase : Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__UpperCAmelCase : Any = flow_network.graph
__UpperCAmelCase : List[Any] = False
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
__UpperCAmelCase : Union[str, Any] = True
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
class _A ( lowerCamelCase_ ):
def __init__( self , __UpperCAmelCase ) -> int:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
__UpperCAmelCase : int = -1
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCamelCase_ ):
def __init__( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCAmelCase : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
__UpperCAmelCase : Optional[int] = [0] * self.verticies_count
__UpperCAmelCase : int = [0] * self.verticies_count
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__UpperCAmelCase : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__UpperCAmelCase : List[Any] = 0
while i < len(__UpperCAmelCase ):
__UpperCAmelCase : List[Any] = vertices_list[i]
__UpperCAmelCase : Tuple = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
__UpperCAmelCase : Optional[Any] = 0
else:
i += 1
__UpperCAmelCase : Union[str, Any] = sum(self.preflow[self.source_index] )
def __A ( self , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__UpperCAmelCase : List[str] = self.heights[to_index]
if min_height is not None:
__UpperCAmelCase : Union[str, Any] = min_height + 1
if __name__ == "__main__":
_UpperCamelCase = [0]
_UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_UpperCamelCase = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
def lowercase_ ( _A : str , _A : str ):
"""simple docstring"""
def get_matched_characters(_A : Tuple , _A : Dict ) -> str:
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCamelCase__ : List[Any] = int(max(0 , i - limit ) )
lowerCamelCase__ : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
lowerCamelCase__ : str = F"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
lowerCamelCase__ : Union[str, Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ : Optional[int] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ : Any = len(lowerCAmelCase_ )
# transposition
lowerCamelCase__ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
lowerCamelCase__ : List[Any] = 0.0
else:
lowerCamelCase__ : List[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCamelCase__ : List[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 184 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : Union[str, Any] ={
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] ="table-transformer"
SCREAMING_SNAKE_CASE_ : Any =["past_key_values"]
SCREAMING_SNAKE_CASE_ : List[str] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , __A : List[Any]=True , __A : Dict=None , __A : Union[str, Any]=3 , __A : Optional[int]=1_0_0 , __A : List[Any]=6 , __A : Dict=2_0_4_8 , __A : int=8 , __A : Tuple=6 , __A : Union[str, Any]=2_0_4_8 , __A : Union[str, Any]=8 , __A : int=0.0 , __A : Dict=0.0 , __A : Tuple=True , __A : int="relu" , __A : Any=2_5_6 , __A : Tuple=0.1 , __A : int=0.0 , __A : List[str]=0.0 , __A : str=0.02 , __A : List[str]=1.0 , __A : Any=False , __A : Tuple="sine" , __A : int="resnet50" , __A : Optional[int]=True , __A : Optional[Any]=False , __A : str=1 , __A : List[Any]=5 , __A : Optional[Any]=2 , __A : int=1 , __A : List[str]=1 , __A : Union[str, Any]=5 , __A : str=2 , __A : int=0.1 , **__A : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__UpperCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__A , __A ):
__UpperCamelCase = backbone_config.get('model_type' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(__A )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None, None, None
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def _lowerCamelCase ( self : int ):
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self : Any ):
return self.d_model
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =version.parse("1.11" )
@property
def _lowerCamelCase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _lowerCamelCase ( self : Union[str, Any] ):
return 1e-5
@property
def _lowerCamelCase ( self : Union[str, Any] ):
return 1_2
| 53 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger()
@dataclass
class A__ :
_UpperCAmelCase :Optional[Any] = 4_2
_UpperCAmelCase :Tuple = field(default_factory=lowerCamelCase_ )
_UpperCAmelCase :int = field(default_factory=lowerCamelCase_ )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A__ :
_UpperCAmelCase :Any = 4_2
_UpperCAmelCase :List[Any] = 4_2
_UpperCAmelCase :int = 0
_UpperCAmelCase :Optional[Any] = field(default_factory=lowerCamelCase_ )
_UpperCAmelCase :Optional[Any] = field(default_factory=lowerCamelCase_ )
def __call__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = Tracker(self.dest )(A_ ).parametrized
UpperCamelCase : List[Any] = Tracker(self.src )(A_ ).parametrized
UpperCamelCase : Optional[int] = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
UpperCamelCase : List[str] = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(A_ )} operations while"""
F""" destination module has {len(A_ )}.""" )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ) -> str:
print(F"""Converting {name}...""" )
with torch.no_grad():
UpperCamelCase : List[str] = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ ).eval()
UpperCamelCase : Union[str, Any] = ResNetForImageClassification(lowerCAmelCase_ ).eval()
UpperCamelCase : List[str] = ModuleTransfer(src=lowerCAmelCase_ , dest=lowerCAmelCase_ )
UpperCamelCase : int = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ) , our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
UpperCamelCase : str = F"""resnet{"-".join(name.split("resnet" ) )}"""
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=lowerCAmelCase_ , )
# we can use the convnext one
UpperCamelCase : Optional[int] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=lowerCAmelCase_ , )
print(F"""Pushed {checkpoint_name}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True ) -> List[Any]:
UpperCamelCase : Optional[Any] = "imagenet-1k-id2label.json"
UpperCamelCase : Dict = 1000
UpperCamelCase : Dict = (1, num_labels)
UpperCamelCase : List[Any] = "huggingface/label-files"
UpperCamelCase : List[str] = num_labels
UpperCamelCase : Tuple = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase : Union[str, Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase : Dict = idalabel
UpperCamelCase : Tuple = {v: k for k, v in idalabel.items()}
UpperCamelCase : List[Any] = partial(lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
UpperCamelCase : Any = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_ , names_to_config[model_name] , lowerCAmelCase_ , lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowerCamelCase : List[str] = parser.parse_args()
__lowerCamelCase : int = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 52 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: str = logging.get_logger(__name__)
__a: Optional[int] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "nllb-moe"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __lowerCAmelCase=128112 , __lowerCAmelCase=1024 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0_5 , __lowerCAmelCase=0.0_5 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=1024 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="float32" , __lowerCAmelCase=False , __lowerCAmelCase=128 , __lowerCAmelCase=64 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=0.0_0_1 , __lowerCAmelCase=0.0_0_1 , __lowerCAmelCase="all" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , __lowerCAmelCase=0.2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> List[Any]:
lowercase__ : Any = vocab_size
lowercase__ : int = max_position_embeddings
lowercase__ : Dict = d_model
lowercase__ : int = encoder_ffn_dim
lowercase__ : Dict = encoder_layers
lowercase__ : Dict = encoder_attention_heads
lowercase__ : Tuple = decoder_ffn_dim
lowercase__ : int = decoder_layers
lowercase__ : List[Any] = decoder_attention_heads
lowercase__ : List[Any] = dropout
lowercase__ : Tuple = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Union[str, Any] = activation_function
lowercase__ : List[Any] = init_std
lowercase__ : Tuple = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : Dict = use_cache
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : List[str] = router_z_loss_coef
lowercase__ : int = router_aux_loss_coef
lowercase__ : Tuple = decoder_sparse_step
lowercase__ : Union[str, Any] = encoder_sparse_step
lowercase__ : Union[str, Any] = num_experts
lowercase__ : int = expert_capacity
lowercase__ : Optional[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
lowercase__ : Optional[int] = router_dtype
lowercase__ : List[str] = router_ignore_padding_tokens
lowercase__ : Dict = batch_prioritized_routing
lowercase__ : List[Any] = second_expert_policy
lowercase__ : Optional[int] = normalize_router_prob_before_dropping
lowercase__ : str = moe_eval_capacity_token_fraction
lowercase__ : str = moe_token_dropout
lowercase__ : Union[str, Any] = output_router_logits
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 198 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCAmelCase = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase : Dict = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> int:
if rng is None:
__snake_case: str = global_rng
__snake_case: List[Any] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , A : str , A : List[Any]=7 , A : List[Any]=400 , A : int=2_000 , A : Union[str, Any]=1 , A : str=0.0 , A : Union[str, Any]=16_000 , A : List[Any]=True , A : Tuple=True , ):
__snake_case: str = parent
__snake_case: int = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Tuple = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: List[Any] = feature_size
__snake_case: str = padding_value
__snake_case: Tuple = sampling_rate
__snake_case: List[Any] = return_attention_mask
__snake_case: Optional[int] = do_normalize
def UpperCAmelCase__ ( self : Any ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : str , A : Optional[Any]=False , A : Union[str, Any]=False ):
def _flatten(A : Optional[Any] ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__snake_case: Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: str = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = WavaVecaFeatureExtractor
def UpperCAmelCase__ ( self : str ):
__snake_case: Any = WavaVecaFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : List[str] , A : str ):
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCAmelCase__ ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__snake_case: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case: Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: List[str] = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: Any = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__snake_case: List[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test batched
__snake_case: Tuple = feat_extract(A , return_tensors="""np""" ).input_values
__snake_case: Any = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: List[str] = np.asarray(A )
__snake_case: str = feat_extract(A , return_tensors="""np""" ).input_values
__snake_case: Tuple = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: Tuple = ["""longest""", """max_length""", """do_not_pad"""]
__snake_case: Optional[int] = [None, 1_600, None]
for max_length, padding in zip(A , A ):
__snake_case: List[Any] = feat_extract(A , padding=A , max_length=A , return_tensors="""np""" )
__snake_case: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: Optional[int] = range(800 , 1_400 , 200 )
__snake_case: Tuple = [floats_list((1, x) )[0] for x in lengths]
__snake_case: Any = ["""longest""", """max_length""", """do_not_pad"""]
__snake_case: Dict = [None, 1_600, None]
for max_length, padding in zip(A , A ):
__snake_case: Optional[int] = feat_extract(A , max_length=A , padding=A )
__snake_case: Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: Dict = feat_extract(
A , truncation=A , max_length=1_000 , padding="""max_length""" , return_tensors="""np""" )
__snake_case: List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: Optional[Any] = feat_extract(
A , truncation=A , max_length=1_000 , padding="""longest""" , return_tensors="""np""" )
__snake_case: Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
__snake_case: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: Optional[int] = feat_extract(
A , truncation=A , max_length=2_000 , padding="""longest""" , return_tensors="""np""" )
__snake_case: List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
import torch
__snake_case: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: Any = np.random.rand(100 ).astype(np.floataa )
__snake_case: List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case: Optional[int] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__snake_case: Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__snake_case: Any = WavaVecaConfig.from_pretrained(A )
__snake_case: int = WavaVecaFeatureExtractor.from_pretrained(A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = iter(lowerCAmelCase_ )
while True:
lowerCAmelCase__ : List[Any] = tuple(itertools.islice(lowerCAmelCase_ , lowerCAmelCase_ ) )
if not chunk:
return
yield chunk
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCAmelCase__ : int = """"""
if len(lowerCAmelCase_ ) < 2:
return dirty
for i in range(len(lowerCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCAmelCase_ ) & 1:
clean += "X"
return clean
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCAmelCase__ : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCAmelCase_ )
return table
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = generate_table(lowerCAmelCase_ )
lowerCAmelCase__ : Union[str, Any] = prepare_input(lowerCAmelCase_ )
lowerCAmelCase__ : str = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase_ , 2 ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = divmod(table.index(lowerCAmelCase_ ) , 5 )
lowerCAmelCase__ , lowerCAmelCase__ : str = divmod(table.index(lowerCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = generate_table(lowerCAmelCase_ )
lowerCAmelCase__ : Union[str, Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase_ , 2 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = divmod(table.index(lowerCAmelCase_ ) , 5 )
lowerCAmelCase__ , lowerCAmelCase__ : Any = divmod(table.index(lowerCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Dict , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : List[str] , ) -> List[Any]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> Optional[Any]:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> List[str]:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> Dict:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> Union[str, Any]:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : Optional[int] , ) -> str:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 284 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ = logging.get_logger(__name__)
A__ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class a ( lowerCamelCase_ ):
__lowerCAmelCase : Tuple = """blip_2_vision_model"""
def __init__( self :List[Any] ,__lowercase :List[Any]=1_4_0_8 ,__lowercase :Optional[Any]=6_1_4_4 ,__lowercase :Optional[int]=3_9 ,__lowercase :Optional[int]=1_6 ,__lowercase :Optional[Any]=2_2_4 ,__lowercase :Tuple=1_4 ,__lowercase :Optional[Any]="gelu" ,__lowercase :Union[str, Any]=0.0_0001 ,__lowercase :Dict=0.0 ,__lowercase :Union[str, Any]=1e-1_0 ,__lowercase :int=True ,**__lowercase :str ,):
super().__init__(**__lowercase )
snake_case__ : str = hidden_size
snake_case__ : Tuple = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = patch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Dict = attention_dropout
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[int] = qkv_bias
@classmethod
def __lowerCamelCase ( cls :Dict ,__lowercase :Union[str, os.PathLike] ,**__lowercase :str ):
cls._set_token_in_kwargs(__lowercase )
snake_case__ , snake_case__ : int = cls.get_config_dict(__lowercase ,**__lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
snake_case__ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase ,**__lowercase )
class a ( lowerCamelCase_ ):
__lowerCAmelCase : str = """blip_2_qformer"""
def __init__( self :Any ,__lowercase :Dict=3_0_5_2_2 ,__lowercase :int=7_6_8 ,__lowercase :List[Any]=1_2 ,__lowercase :List[str]=1_2 ,__lowercase :Optional[Any]=3_0_7_2 ,__lowercase :str="gelu" ,__lowercase :Optional[Any]=0.1 ,__lowercase :Union[str, Any]=0.1 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :List[Any]=0.02 ,__lowercase :List[str]=1e-1_2 ,__lowercase :Tuple=0 ,__lowercase :Union[str, Any]="absolute" ,__lowercase :List[Any]=2 ,__lowercase :List[str]=1_4_0_8 ,**__lowercase :Optional[Any] ,):
super().__init__(pad_token_id=__lowercase ,**__lowercase )
snake_case__ : Tuple = vocab_size
snake_case__ : int = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Any = initializer_range
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : int = position_embedding_type
snake_case__ : Optional[int] = cross_attention_frequency
snake_case__ : Tuple = encoder_hidden_size
@classmethod
def __lowerCamelCase ( cls :List[Any] ,__lowercase :Union[str, os.PathLike] ,**__lowercase :Dict ):
cls._set_token_in_kwargs(__lowercase )
snake_case__ , snake_case__ : Any = cls.get_config_dict(__lowercase ,**__lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
snake_case__ : int = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase ,**__lowercase )
class a ( lowerCamelCase_ ):
__lowerCAmelCase : Tuple = """blip-2"""
__lowerCAmelCase : Any = True
def __init__( self :int ,__lowercase :Dict=None ,__lowercase :Tuple=None ,__lowercase :str=None ,__lowercase :Union[str, Any]=3_2 ,**__lowercase :int ):
super().__init__(**__lowercase )
if vision_config is None:
snake_case__ : List[Any] = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
snake_case__ : Tuple = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
snake_case__ : str = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
snake_case__ : Any = BlipaVisionConfig(**__lowercase )
snake_case__ : Optional[int] = BlipaQFormerConfig(**__lowercase )
snake_case__ : Tuple = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
snake_case__ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**__lowercase )
snake_case__ : Tuple = self.text_config.tie_word_embeddings
snake_case__ : List[str] = self.text_config.is_encoder_decoder
snake_case__ : Any = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : Union[str, Any] = 1.0
snake_case__ : int = 0.02
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ,__lowercase :BlipaVisionConfig ,__lowercase :BlipaQFormerConfig ,__lowercase :PretrainedConfig ,**__lowercase :Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__lowercase ,)
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : Union[str, Any] = self.qformer_config.to_dict()
snake_case__ : Any = self.text_config.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 230 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
"""simple docstring"""
import baseaa
def _snake_case ( snake_case__ : List[str] ):
return baseaa.aaaencode(string.encode('utf-8' ) )
def _snake_case ( snake_case__ : int ):
return baseaa.aaadecode(lowerCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class _A ( lowerCamelCase_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : int = 0
lowerCamelCase__ : List[Any] = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
lowerCamelCase__ : List[Any] = len(lowerCAmelCase_ ) // 2
lowerCamelCase__ : List[str] = arr[0:mid]
lowerCamelCase__ : List[Any] = arr[mid:]
lowerCamelCase__ , lowerCamelCase__ : List[Any] = count_inversions_recursive(lowerCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Any = count_inversions_recursive(lowerCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase_ ( _A : str , _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[int] = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCamelCase__ : str = count_inversions_bf(lowerCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCamelCase__ : Union[str, Any] = count_inversions_bf(lowerCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
# an empty list should also have zero inversions
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Dict = count_inversions_bf(lowerCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 184 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
a__ : Any =logging.get_logger(__name__)
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =42
SCREAMING_SNAKE_CASE_ : Optional[int] =None
@staticmethod
def _lowerCamelCase ( ):
raise NotImplementedError
def _lowerCamelCase ( self : Dict , __A : Any , __A : int , __A : str , **__A : Optional[Any] ):
raise NotImplementedError
def _lowerCamelCase ( self : str , __A : List[Any] ):
raise NotImplementedError
def _lowerCamelCase ( self : List[Any] ):
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
return f'''`pip install {cls.pip_package or cls.name}`'''
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int ="optuna"
@staticmethod
def _lowerCamelCase ( ):
return is_optuna_available()
def _lowerCamelCase ( self : int , __A : str , __A : int , __A : str , **__A : List[str] ):
return run_hp_search_optuna(__A , __A , __A , **__A )
def _lowerCamelCase ( self : str , __A : Optional[int] ):
return default_hp_space_optuna(__A )
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] ="ray"
SCREAMING_SNAKE_CASE_ : Optional[int] ="\'ray[tune]\'"
@staticmethod
def _lowerCamelCase ( ):
return is_ray_available()
def _lowerCamelCase ( self : int , __A : Union[str, Any] , __A : int , __A : str , **__A : List[str] ):
return run_hp_search_ray(__A , __A , __A , **__A )
def _lowerCamelCase ( self : str , __A : int ):
return default_hp_space_ray(__A )
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="sigopt"
@staticmethod
def _lowerCamelCase ( ):
return is_sigopt_available()
def _lowerCamelCase ( self : int , __A : Optional[Any] , __A : int , __A : str , **__A : Union[str, Any] ):
return run_hp_search_sigopt(__A , __A , __A , **__A )
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
return default_hp_space_sigopt(__A )
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int ="wandb"
@staticmethod
def _lowerCamelCase ( ):
return is_wandb_available()
def _lowerCamelCase ( self : List[Any] , __A : Tuple , __A : int , __A : str , **__A : List[str] ):
return run_hp_search_wandb(__A , __A , __A , **__A )
def _lowerCamelCase ( self : Tuple , __A : Any ):
return default_hp_space_wandb(__A )
a__ : Tuple ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowercase__ ( ) -> int:
"""simple docstring"""
__UpperCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCAmelCase_ ) > 0:
__UpperCamelCase = available_backends[0].name
if len(lowerCAmelCase_ ) > 1:
logger.info(
F'''{len(lowerCAmelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 53 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCamelCase : Dict = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( _lowerCAmelCase ) -> str:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
if args.student_type == "roberta":
UpperCamelCase : List[Any] = False
elif args.student_type == "gpt2":
UpperCamelCase : Union[str, Any] = False
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
if args.student_type == "roberta":
UpperCamelCase : Dict = False
def A_ ( ) -> Union[str, Any]:
UpperCamelCase : int = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowerCAmelCase_ , choices=["distilbert", "roberta", "gpt2"] , required=lowerCAmelCase_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowerCAmelCase_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowerCAmelCase_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowerCAmelCase_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowerCAmelCase_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowerCAmelCase_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowerCAmelCase_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowerCAmelCase_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowerCAmelCase_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowerCAmelCase_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowerCAmelCase_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowerCAmelCase_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowerCAmelCase_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowerCAmelCase_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only." , )
parser.add_argument("--n_epoch" , type=lowerCAmelCase_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowerCAmelCase_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCAmelCase_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowerCAmelCase_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowerCAmelCase_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=lowerCAmelCase_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=lowerCAmelCase_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowerCAmelCase_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowerCAmelCase_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowerCAmelCase_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowerCAmelCase_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowerCAmelCase_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowerCAmelCase_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowerCAmelCase_ , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowerCAmelCase_ , default=4000 , help="Checkpoint interval." )
UpperCamelCase : int = parser.parse_args()
sanity_checks(lowerCAmelCase_ )
# ARGS #
init_gpu_params(lowerCAmelCase_ )
set_seed(lowerCAmelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowerCAmelCase_ ) , lowerCAmelCase_ , indent=4 )
git_log(args.dump_path )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = MODEL_CLASSES[args.student_type]
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase : Dict = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase : Dict = tokenizer.all_special_tokens.index(lowerCAmelCase_ )
UpperCamelCase : Tuple = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
UpperCamelCase : Any = special_tok_ids
UpperCamelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCamelCase : List[str] = pickle.load(lowerCAmelCase_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCamelCase : Optional[Any] = pickle.load(lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = np.maximum(lowerCAmelCase_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase : Any = 0.0 # do not predict special tokens
UpperCamelCase : str = torch.from_numpy(lowerCAmelCase_ )
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = LmSeqsDataset(params=lowerCAmelCase_ , data=lowerCAmelCase_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
UpperCamelCase : List[str] = student_config_class.from_pretrained(args.student_config )
UpperCamelCase : int = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCamelCase : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase_ )
else:
UpperCamelCase : Tuple = student_model_class(lowerCAmelCase_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCamelCase : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase : Union[str, Any] = Distiller(
params=lowerCAmelCase_ , dataset=lowerCAmelCase_ , token_probs=lowerCAmelCase_ , student=lowerCAmelCase_ , teacher=lowerCAmelCase_ )
distiller.train()
logger.info("Let\'s go get some drinks." )
if __name__ == "__main__":
main()
| 52 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__a: str = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={"help": "whether to use adafactor"} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
SCREAMING_SNAKE_CASE = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 198 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
__UpperCAmelCase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.load(lowerCAmelCase_ , map_location='cpu' )
return sd
def lowercase__ ( __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple=rename_keys_prefix ):
'''simple docstring'''
UpperCAmelCase_ : int = OrderedDict()
UpperCAmelCase_ : int = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
UpperCAmelCase_ : str = key
for name_pair in rename_keys_prefix:
UpperCAmelCase_ : Dict = new_key.replace(name_pair[0] , name_pair[1] )
UpperCAmelCase_ : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
UpperCAmelCase_ : Dict = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : int ):
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
UpperCAmelCase_ : int = 'pretraining'
if "vcr" in checkpoint_path:
UpperCAmelCase_ : Tuple = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ : Optional[int] = {'visual_embedding_dim': 2_048}
elif "vqa" in checkpoint_path:
UpperCAmelCase_ : List[Any] = {'visual_embedding_dim': 2_048}
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ : Union[str, Any] = {'visual_embedding_dim': 1_024}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
UpperCAmelCase_ : Tuple = {'visual_embedding_dim': 512}
UpperCAmelCase_ : int = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ : str = {'visual_embedding_dim': 2_048}
UpperCAmelCase_ : List[Any] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
UpperCAmelCase_ : Tuple = {'visual_embedding_dim': 2_048, 'num_labels': 3_129}
UpperCAmelCase_ : Union[str, Any] = 'vqa'
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ : Optional[Any] = {
'visual_embedding_dim': 1_024,
'num_labels': 2,
}
UpperCAmelCase_ : List[Any] = 'nlvr'
UpperCAmelCase_ : Any = VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
UpperCAmelCase_ : Dict = load_state_dict(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = get_new_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if model_type == "pretraining":
UpperCAmelCase_ : Optional[Any] = VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
UpperCAmelCase_ : Tuple = VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
UpperCAmelCase_ : int = VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
UpperCAmelCase_ : str = VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
__UpperCAmelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 29 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
import requests
from bsa import BeautifulSoup
def A__ ( SCREAMING_SNAKE_CASE__ = "AAPL") -> Any:
__snake_case: Optional[int] = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
__snake_case: int = BeautifulSoup(requests.get(lowerCAmelCase_).text , """html.parser""")
__snake_case: Any = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_).find("""span""").text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 111 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
_lowerCAmelCase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_snake_case : List[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_snake_case : Optional[int] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_snake_case : int = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]="auto" , lowerCAmelCase_ : List[str]=-1 , lowerCAmelCase_ : Union[str, Any]=0.9 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Union[str, Any]=5_0_0 , lowerCAmelCase_ : Union[str, Any]="gpt2-large" , lowerCAmelCase_ : Union[str, Any]=-1 , lowerCAmelCase_ : Optional[Any]=1_0_2_4 , lowerCAmelCase_ : Optional[Any]=2_5 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=2_5 , ) -> int:
__lowerCAmelCase = compute_mauve(
p_text=lowerCAmelCase_ , q_text=lowerCAmelCase_ , p_features=lowerCAmelCase_ , q_features=lowerCAmelCase_ , p_tokens=lowerCAmelCase_ , q_tokens=lowerCAmelCase_ , num_buckets=lowerCAmelCase_ , pca_max_data=lowerCAmelCase_ , kmeans_explained_var=lowerCAmelCase_ , kmeans_num_redo=lowerCAmelCase_ , kmeans_max_iter=lowerCAmelCase_ , featurize_model_name=lowerCAmelCase_ , device_id=lowerCAmelCase_ , max_text_length=lowerCAmelCase_ , divergence_curve_discretization_size=lowerCAmelCase_ , mauve_scaling_factor=lowerCAmelCase_ , verbose=lowerCAmelCase_ , seed=lowerCAmelCase_ , )
return out
| 284 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowercase = {'''facebook/blenderbot-3B''': 1_28}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: List[Any] = ['''input_ids''', '''attention_mask''']
_lowerCamelCase: str = BlenderbotTokenizer
def __init__( self : Tuple ,A_ : Optional[Any]=None ,A_ : List[str]=None ,A_ : Dict=None ,A_ : str="replace" ,A_ : Tuple="<s>" ,A_ : List[Any]="</s>" ,A_ : Any="</s>" ,A_ : Optional[int]="<s>" ,A_ : Tuple="<unk>" ,A_ : Dict="<pad>" ,A_ : List[Any]="<mask>" ,A_ : Dict=False ,A_ : List[Any]=True ,**A_ : int ,) -> List[Any]:
super().__init__(
A_ ,A_ ,tokenizer_file=A_ ,errors=A_ ,bos_token=A_ ,eos_token=A_ ,sep_token=A_ ,cls_token=A_ ,unk_token=A_ ,pad_token=A_ ,mask_token=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ,**A_ ,)
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,A_ ) != add_prefix_space:
A = getattr(A_ ,pre_tok_state.pop('type' ) )
A = add_prefix_space
A = pre_tok_class(**A_ )
A = add_prefix_space
A = 'post_processor'
A = getattr(self.backend_tokenizer ,A_ ,A_ )
if tokenizer_component_instance:
A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A = tuple(state['sep'] )
if "cls" in state:
A = tuple(state['cls'] )
A = False
if state.get('add_prefix_space' ,A_ ) != add_prefix_space:
A = add_prefix_space
A = True
if state.get('trim_offsets' ,A_ ) != trim_offsets:
A = trim_offsets
A = True
if changes_to_apply:
A = getattr(A_ ,state.pop('type' ) )
A = component_class(**A_ )
setattr(self.backend_tokenizer ,A_ ,A_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[Any] ) -> str:
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else value
A = value
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*A_ : Optional[Any] ,**A_ : List[str] ) -> Optional[int]:
A = kwargs.get('is_split_into_words' ,A_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Tuple:
A = kwargs.get('is_split_into_words' ,A_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ,A_ : Optional[str] = None ) -> Any:
A = self._tokenizer.model.save(A_ ,name=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[str]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : "Conversation" ) -> int:
A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
A = ' '.join(A_ )
A = self.encode(A_ )
if len(A_ ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 74 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCamelCase = 10
def lowercase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowercase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : str = 0
__UpperCAmelCase : str = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Tuple = (left + right) // 3 + 1
__UpperCAmelCase : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCAmelCase : int = one_third - 1
elif array[two_third] < target:
__UpperCAmelCase : List[str] = two_third + 1
else:
__UpperCAmelCase : Dict = one_third + 1
__UpperCAmelCase : List[str] = two_third - 1
else:
return -1
def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
__UpperCAmelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by comma:\n''').strip()
_UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_UpperCamelCase = int(input('''Enter the number to be found in the list:\n''').strip())
_UpperCamelCase = ite_ternary_search(collection, target)
_UpperCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_)
class _lowercase ( lowerCamelCase_):
"""simple docstring"""
A__ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True})
A__ = Features({"text": Value("string")})
A__ = Features({"summary": Value("string")})
A__ = "text"
A__ = "summary"
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 184 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : Any , __lowercase : List[str] ) -> Dict:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 53 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
from __future__ import annotations
import numpy as np
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
return np.maximum(0 , lowerCAmelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 52 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334 | 0 |
'''simple docstring'''
import math
import os
import sys
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : int = ''''''
try:
with open(lowerCAmelCase_ , '''rb''' ) as binary_file:
lowercase__ : List[Any] = binary_file.read()
for dat in data:
lowercase__ : Union[str, Any] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lexicon.pop(lowerCAmelCase_ )
lowercase__ : Any = last_match_id
if math.loga(lowerCAmelCase_ ).is_integer():
for curr_key in lexicon:
lowercase__ : Tuple = '''0''' + lexicon[curr_key]
lowercase__ : Any = bin(lowerCAmelCase_ )[2:]
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Dict = {'''0''': '''0''', '''1''': '''1'''}
lowercase__ , lowercase__ : Any = '''''', ''''''
lowercase__ : Optional[Any] = len(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
index += 1
lowercase__ : Optional[int] = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase__ : str = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = os.path.getsize(lowerCAmelCase_ )
lowercase__ : Dict = bin(lowerCAmelCase_ )[2:]
lowercase__ : Tuple = len(lowerCAmelCase_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[Any] = 8
try:
with open(lowerCAmelCase_ , '''wb''' ) as opened_file:
lowercase__ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase_ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = read_file_binary(lowerCAmelCase_ )
lowercase__ : Any = compress_data(lowerCAmelCase_ )
lowercase__ : int = add_file_length(lowerCAmelCase_ , lowerCAmelCase_ )
write_file_binary(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 198 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_vision_model'
def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip_2_qformer'
def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,):
super().__init__(pad_token_id=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =position_embedding_type
SCREAMING_SNAKE_CASE =cross_attention_frequency
SCREAMING_SNAKE_CASE =encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ):
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case ,**snake_case )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'blip-2'
__UpperCAmelCase = True
def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ):
super().__init__(**snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case )
SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case )
SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE =num_query_tokens
SCREAMING_SNAKE_CASE =self.vision_config.hidden_size
SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE =1.0
SCREAMING_SNAKE_CASE =0.02
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
SCREAMING_SNAKE_CASE =self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE =self.text_config.to_dict()
SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 334 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : Any = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_snake_case : List[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_snake_case : Optional[Any] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_snake_case : Optional[int] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_snake_case : List[str] = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_snake_case : Tuple = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : Union[str, Any] = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_snake_case : int = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_snake_case : int = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_snake_case : Union[str, Any] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_snake_case : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_snake_case : Tuple = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_snake_case : Tuple = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_snake_case : Any = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_snake_case : Union[str, Any] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_snake_case : int = field(default=lowerCamelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_snake_case : Optional[int] = field(default=lowerCamelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_snake_case : Any = field(default=lowerCamelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_snake_case : str = field(
default=lowerCamelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowercase__ ( __snake_case : str , __snake_case : List[Any] , __snake_case : int ):
'''simple docstring'''
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , F"{split}_results.json" ) )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : str = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ), F"({config.__class__.__name__}) doesn\'t have a `{p}` attribute"
setattr(lowerCAmelCase_ , lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCAmelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : str = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCAmelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : int = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Any = (
dataset_class(
lowerCAmelCase_ , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
UpperCAmelCase_ : str = (
dataset_class(
lowerCAmelCase_ , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : Any = (
dataset_class(
lowerCAmelCase_ , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[int] = (
build_compute_metrics_fn(data_args.task , lowerCAmelCase_ ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : Any = SeqaSeqTrainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , data_args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , data_collator=SeqaSeqDataCollator(
lowerCAmelCase_ , lowerCAmelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
UpperCAmelCase_ : str = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : Tuple = train_result.metrics
UpperCAmelCase_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , lowerCAmelCase_ , training_args.output_dir )
all_metrics.update(lowerCAmelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase_ : Tuple = trainer.evaluate(metric_key_prefix='val' )
UpperCAmelCase_ : int = data_args.n_val
UpperCAmelCase_ : Dict = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , lowerCAmelCase_ , training_args.output_dir )
all_metrics.update(lowerCAmelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
UpperCAmelCase_ : Optional[Any] = trainer.predict(test_dataset=lowerCAmelCase_ , metric_key_prefix='test' )
UpperCAmelCase_ : Optional[Any] = test_output.metrics
UpperCAmelCase_ : Union[str, Any] = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Dict = round(metrics['test_loss'] , 4 )
handle_metrics('test' , lowerCAmelCase_ , training_args.output_dir )
all_metrics.update(lowerCAmelCase_ )
if training_args.predict_with_generate:
UpperCAmelCase_ : List[Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = lmap(str.strip , lowerCAmelCase_ )
write_txt_file(lowerCAmelCase_ , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(lowerCAmelCase_ , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 29 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__UpperCAmelCase : List[str] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__UpperCAmelCase : str = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: List[Any] = numpy.dtype(numpy.uintaa).newbyteorder(""">""")
return numpy.frombuffer(bytestream.read(4) , dtype=lowerCAmelCase_)[0]
@deprecated(lowerCAmelCase_ , """Please use tf.data to implement this functionality.""")
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
print("""Extracting""" , f.name)
with gzip.GzipFile(fileobj=lowerCAmelCase_) as bytestream:
__snake_case: List[Any] = _readaa(lowerCAmelCase_)
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name))
__snake_case: Any = _readaa(lowerCAmelCase_)
__snake_case: Dict = _readaa(lowerCAmelCase_)
__snake_case: int = _readaa(lowerCAmelCase_)
__snake_case: List[Any] = bytestream.read(rows * cols * num_images)
__snake_case: List[Any] = numpy.frombuffer(lowerCAmelCase_ , dtype=numpy.uinta)
__snake_case: List[str] = data.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1)
return data
@deprecated(lowerCAmelCase_ , """Please use tf.one_hot on tensors.""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
__snake_case: Tuple = labels_dense.shape[0]
__snake_case: int = numpy.arange(lowerCAmelCase_) * num_classes
__snake_case: List[Any] = numpy.zeros((num_labels, num_classes))
__snake_case: int = 1
return labels_one_hot
@deprecated(lowerCAmelCase_ , """Please use tf.data to implement this functionality.""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=10) -> List[str]:
print("""Extracting""" , f.name)
with gzip.GzipFile(fileobj=lowerCAmelCase_) as bytestream:
__snake_case: Tuple = _readaa(lowerCAmelCase_)
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name))
__snake_case: Tuple = _readaa(lowerCAmelCase_)
__snake_case: Union[str, Any] = bytestream.read(lowerCAmelCase_)
__snake_case: Any = numpy.frombuffer(lowerCAmelCase_ , dtype=numpy.uinta)
if one_hot:
return _dense_to_one_hot(lowerCAmelCase_ , lowerCAmelCase_)
return labels
class __snake_case :
'''simple docstring'''
@deprecated(
A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Union[str, Any] , A : int , A : Union[str, Any] , A : Any=False , A : Optional[int]=False , A : Tuple=dtypes.floataa , A : List[Any]=True , A : Optional[int]=None , ):
__snake_case , __snake_case: Union[str, Any] = random_seed.get_seed(A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__snake_case: Tuple = dtypes.as_dtype(A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__snake_case: List[Any] = 10_000
__snake_case: Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__snake_case: List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__snake_case: Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__snake_case: str = images.astype(numpy.floataa )
__snake_case: Dict = numpy.multiply(A , 1.0 / 255.0 )
__snake_case: Tuple = images
__snake_case: Optional[Any] = labels
__snake_case: Dict = 0
__snake_case: str = 0
@property
def UpperCAmelCase__ ( self : str ):
return self._images
@property
def UpperCAmelCase__ ( self : str ):
return self._labels
@property
def UpperCAmelCase__ ( self : Tuple ):
return self._num_examples
@property
def UpperCAmelCase__ ( self : Dict ):
return self._epochs_completed
def UpperCAmelCase__ ( self : Union[str, Any] , A : int , A : Tuple=False , A : Tuple=True ):
if fake_data:
__snake_case: Tuple = [1] * 784
__snake_case: str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A )],
[fake_label for _ in range(A )],
)
__snake_case: Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__snake_case: str = numpy.arange(self._num_examples )
numpy.random.shuffle(A )
__snake_case: Dict = self.images[perma]
__snake_case: Dict = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__snake_case: Tuple = self._num_examples - start
__snake_case: Union[str, Any] = self._images[start : self._num_examples]
__snake_case: int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__snake_case: Any = numpy.arange(self._num_examples )
numpy.random.shuffle(A )
__snake_case: Optional[int] = self.images[perm]
__snake_case: List[str] = self.labels[perm]
# Start next epoch
__snake_case: Union[str, Any] = 0
__snake_case: List[str] = batch_size - rest_num_examples
__snake_case: int = self._index_in_epoch
__snake_case: Optional[int] = self._images[start:end]
__snake_case: Any = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__snake_case: List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase_ , """Please write your own downloading logic.""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
if not gfile.Exists(lowerCAmelCase_):
gfile.MakeDirs(lowerCAmelCase_)
__snake_case: Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
if not gfile.Exists(lowerCAmelCase_):
urllib.request.urlretrieve(lowerCAmelCase_ , lowerCAmelCase_) # noqa: S310
with gfile.GFile(lowerCAmelCase_) as f:
__snake_case: Union[str, Any] = f.size()
print("""Successfully downloaded""" , lowerCAmelCase_ , lowerCAmelCase_ , """bytes.""")
return filepath
@deprecated(
lowerCAmelCase_ , """Please use alternatives such as:""" """ tensorflow_datasets.load(\'mnist\')""")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=dtypes.floataa , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=5000 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=DEFAULT_SOURCE_URL , ) -> str:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase_ , one_hot=lowerCAmelCase_ , dtype=lowerCAmelCase_ , seed=lowerCAmelCase_)
__snake_case: str = fake()
__snake_case: Any = fake()
__snake_case: Tuple = fake()
return _Datasets(train=lowerCAmelCase_ , validation=lowerCAmelCase_ , test=lowerCAmelCase_)
if not source_url: # empty string check
__snake_case: Any = DEFAULT_SOURCE_URL
__snake_case: List[str] = """train-images-idx3-ubyte.gz"""
__snake_case: int = """train-labels-idx1-ubyte.gz"""
__snake_case: Any = """t10k-images-idx3-ubyte.gz"""
__snake_case: Optional[Any] = """t10k-labels-idx1-ubyte.gz"""
__snake_case: Any = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + train_images_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: Optional[int] = _extract_images(lowerCAmelCase_)
__snake_case: List[str] = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + train_labels_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: int = _extract_labels(lowerCAmelCase_ , one_hot=lowerCAmelCase_)
__snake_case: List[Any] = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + test_images_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: Optional[Any] = _extract_images(lowerCAmelCase_)
__snake_case: str = _maybe_download(
lowerCAmelCase_ , lowerCAmelCase_ , source_url + test_labels_file)
with gfile.Open(lowerCAmelCase_ , """rb""") as f:
__snake_case: Tuple = _extract_labels(lowerCAmelCase_ , one_hot=lowerCAmelCase_)
if not 0 <= validation_size <= len(lowerCAmelCase_):
__snake_case: Tuple = (
"""Validation size should be between 0 and """
F'''{len(lowerCAmelCase_)}. Received: {validation_size}.'''
)
raise ValueError(lowerCAmelCase_)
__snake_case: Union[str, Any] = train_images[:validation_size]
__snake_case: List[str] = train_labels[:validation_size]
__snake_case: str = train_images[validation_size:]
__snake_case: str = train_labels[validation_size:]
__snake_case: Union[str, Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__snake_case: Tuple = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
__snake_case: Union[str, Any] = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
__snake_case: Optional[Any] = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
return _Datasets(train=lowerCAmelCase_ , validation=lowerCAmelCase_ , test=lowerCAmelCase_)
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 3 ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowerCAmelCase_ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
lowerCAmelCase__ : str = QuantumRegister(lowerCAmelCase_ , """qr""" )
lowerCAmelCase__ : Tuple = ClassicalRegister(lowerCAmelCase_ , """cr""" )
lowerCAmelCase__ : str = QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ : int = number_of_qubits
for i in range(lowerCAmelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCAmelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCAmelCase_ , lowerCAmelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCAmelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCAmelCase_ , lowerCAmelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ : Dict = Aer.get_backend("""qasm_simulator""" )
lowerCAmelCase__ : Tuple = execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=10000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Any = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( lowerCamelCase_ , unittest.TestCase ):
__lowerCAmelCase : List[Any] = TextToVideoSDPipeline
__lowerCAmelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowerCAmelCase : Tuple = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __lowerCamelCase ( self :Tuple ):
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
snake_case__ : Tuple = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=__lowercase ,set_alpha_to_one=__lowercase ,)
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
snake_case__ : str = CLIPTextModel(__lowercase )
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[Any] ,__lowercase :int=0 ):
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : Optional[Any] = torch.manual_seed(__lowercase )
else:
snake_case__ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
snake_case__ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : List[str] = TextToVideoSDPipeline(**__lowercase )
snake_case__ : Any = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Tuple = self.get_dummy_inputs(__lowercase )
snake_case__ : List[Any] = '''np'''
snake_case__ : int = sd_pipe(**__lowercase ).frames
snake_case__ : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
snake_case__ : Tuple = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :Tuple ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def __lowerCamelCase ( self :List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __lowerCamelCase ( self :List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __lowerCamelCase ( self :str ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def __lowerCamelCase ( self :Any ):
pass
def __lowerCamelCase ( self :List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
snake_case__ : Dict = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
snake_case__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case__ : Optional[Any] = pipe.to('''cuda''' )
snake_case__ : List[Any] = '''Spiderman is surfing'''
snake_case__ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : Optional[Any] = pipe(__lowercase ,generator=__lowercase ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
snake_case__ : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
snake_case__ : Tuple = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
snake_case__ : Union[str, Any] = pipe.to('''cuda''' )
snake_case__ : str = '''Spiderman is surfing'''
snake_case__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : str = pipe(__lowercase ,generator=__lowercase ,num_inference_steps=2 ,output_type='''pt''' ).frames
snake_case__ : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 230 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
_lowercase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _snake_case ( snake_case__ : List[Any] ):
with open(lowerCAmelCase_ , 'rb' ) as f:
A = Image.open(lowerCAmelCase_ )
return im.convert('RGB' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[int] = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
_lowerCamelCase: List[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase: Optional[int] = field(default=lowerCamelCase_ , metadata={'''help''': '''A folder containing the training data.'''} )
_lowerCamelCase: Optional[Any] = field(default=lowerCamelCase_ , metadata={'''help''': '''A folder containing the validation data.'''} )
_lowerCamelCase: Optional[Any] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
_lowerCamelCase: Optional[Any] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase: int = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[str] = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowerCamelCase: Optional[int] = field(
default=lowerCamelCase_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCamelCase_ )} , )
_lowerCamelCase: List[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[int] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
_lowerCamelCase: List[str] = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCamelCase: List[Any] = field(default=lowerCamelCase_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowerCamelCase: Tuple = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowerCamelCase: List[Any] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _snake_case ( snake_case__ : int ):
A = torch.stack([example['pixel_values'] for example in examples] )
A = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _snake_case ( ):
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
A = {}
if data_args.train_dir is not None:
A = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
A = os.path.join(data_args.validation_dir , '**' )
A = load_dataset(
'imagefolder' , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
A = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
A = dataset['train'].train_test_split(data_args.train_val_split )
A = split['train']
A = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A = dataset['train'].features['labels'].names
A , A = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
A = str(lowerCAmelCase_ )
A = label
# Load the accuracy metric from the datasets package
A = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : List[str] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
A = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A = image_processor.size['shortest_edge']
else:
A = (image_processor.size['height'], image_processor.size['width'])
A = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
A = Compose(
[
RandomResizedCrop(lowerCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A = Compose(
[
Resize(lowerCAmelCase_ ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(snake_case__ : Any ):
A = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(snake_case__ : int ):
A = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
A = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
A = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase_ )
# Initalize our trainer
A = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A = trainer.evaluate()
trainer.log_metrics('eval' , lowerCAmelCase_ )
trainer.save_metrics('eval' , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
A = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main() | 74 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''camembert-base''': 512,
}
_UpperCamelCase = '''▁'''
class _A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Any = CamembertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : str = False if not self.vocab_file else True
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : List[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 254 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Any = logging.get_logger(__name__)
def lowercase_ ( _A : List[str] , _A : Optional[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_ ( _A : Union[str, Any] , _A : Tuple , _A : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ : int = ""
else:
lowerCamelCase__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCamelCase__ : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase_ ( _A : Optional[int] , _A : int , _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = dct.pop(lowerCAmelCase_ )
lowerCamelCase__ : Optional[int] = val
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowercase_ ( _A : List[str] , _A : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple = ViTConfig()
lowerCamelCase__ : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCamelCase__ : str = True
lowerCamelCase__ : Dict = int(vit_name[-12:-10] )
lowerCamelCase__ : Dict = int(vit_name[-9:-6] )
else:
lowerCamelCase__ : List[str] = 1000
lowerCamelCase__ : List[str] = "huggingface/label-files"
lowerCamelCase__ : Optional[int] = "imagenet-1k-id2label.json"
lowerCamelCase__ : str = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : int = int(vit_name[-6:-4] )
lowerCamelCase__ : Union[str, Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
lowerCamelCase__ : Union[str, Any] = 192
lowerCamelCase__ : Optional[int] = 768
lowerCamelCase__ : str = 12
lowerCamelCase__ : List[Any] = 3
elif vit_name[9:].startswith("small" ):
lowerCamelCase__ : str = 384
lowerCamelCase__ : Optional[Any] = 1536
lowerCamelCase__ : Any = 12
lowerCamelCase__ : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
lowerCamelCase__ : Dict = 768
lowerCamelCase__ : str = 2304
lowerCamelCase__ : List[Any] = 8
lowerCamelCase__ : int = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
lowerCamelCase__ : Union[str, Any] = 1024
lowerCamelCase__ : Any = 4096
lowerCamelCase__ : Union[str, Any] = 24
lowerCamelCase__ : List[str] = 16
elif vit_name[4:].startswith("huge" ):
lowerCamelCase__ : Optional[Any] = 1280
lowerCamelCase__ : Tuple = 5120
lowerCamelCase__ : int = 32
lowerCamelCase__ : List[Any] = 16
# load original model from timm
lowerCamelCase__ : Optional[Any] = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : int = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
lowerCamelCase__ : str = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase__ : Optional[int] = ViTModel(lowerCAmelCase_ ).eval()
else:
lowerCamelCase__ : str = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCamelCase__ : Any = DeiTImageProcessor(size=config.image_size )
else:
lowerCamelCase__ : Tuple = ViTImageProcessor(size=config.image_size )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__ : Any = encoding["pixel_values"]
lowerCamelCase__ : str = model(lowerCAmelCase_ )
if base_model:
lowerCamelCase__ : Optional[int] = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_ , outputs.pooler_output , atol=1E-3 )
else:
lowerCamelCase__ : Dict = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Tuple = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 184 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : Dict , __lowercase : Tuple , __lowercase : int , __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = len(lowerCAmelCase_ ), len(grid[0] )
if (
min(lowerCAmelCase_ , lowerCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__UpperCamelCase = 0
count += depth_first_search(lowerCAmelCase_ , row + 1 , lowerCAmelCase_ , lowerCAmelCase_ )
count += depth_first_search(lowerCAmelCase_ , row - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
count += depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , col + 1 , lowerCAmelCase_ )
count += depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , col - 1 , lowerCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(A_ ):
UpperCamelCase : str = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
UpperCamelCase : List[str] = FlaxAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(A_ ):
UpperCamelCase : int = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
UpperCamelCase : Union[str, Any] = FlaxAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase : Any = AutoTokenizer.from_pretrained(A_ )
UpperCamelCase : List[str] = FlaxBertModel.from_pretrained(A_ )
UpperCamelCase : Optional[Any] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**A_ ):
return model(**A_ )
eval(**A_ ).block_until_ready()
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(A_ )
UpperCamelCase : str = FlaxRobertaModel.from_pretrained(A_ )
UpperCamelCase : int = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**A_ ):
return model(**A_ )
eval(**A_ ).block_until_ready()
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase : str = FlaxAutoModel.from_pretrained("bert-base" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase : List[str] = FlaxAutoModel.from_pretrained(A_ , revision="aaaaaa" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
UpperCamelCase : int = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(A_ , "Use `from_pt=True` to load this model" ):
UpperCamelCase : Union[str, Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 52 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__a: List[str] = pytest.mark.integration
__a: List[Any] = {"""comet"""}
__a: str = importlib.util.find_spec("""fairseq""") is not None
__a: List[str] = {"""code_eval"""}
__a: List[Any] = os.name == """nt"""
__a: Any = {"""bertscore""", """frugalscore""", """perplexity"""}
__a: Union[str, Any] = importlib.util.find_spec("""transformers""") is not None
def __UpperCamelCase ( UpperCAmelCase ):
@wraps(lowerCAmelCase_ )
def wrapper(self , UpperCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __UpperCamelCase ( UpperCAmelCase ):
@wraps(lowerCAmelCase_ )
def wrapper(self , UpperCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __UpperCamelCase ( UpperCAmelCase ):
@wraps(lowerCAmelCase_ )
def wrapper(self , UpperCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __UpperCamelCase ( ):
lowercase__ : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@local
class UpperCAmelCase ( parameterized.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : int = '''[...]'''
lowercase__ : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
lowercase__ : Union[str, Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCAmelCase )
# check parameters
lowercase__ : Union[str, Any] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase__ : List[Any] = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
lowercase__ : int = '''[...]'''
lowercase__ : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase__ : List[Any] = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCAmelCase ):
yield
else:
yield
@contextmanager
def _lowerCAmelCase( self ) -> Union[str, Any]:
def load_local_metric(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return load_metric(os.path.join('''metrics''' , __lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
lowercase__ : Optional[Any] = load_local_metric
yield
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase ) -> Optional[Any]:
def wrapper(__lowerCAmelCase ):
lowercase__ : Optional[int] = contextmanager(__lowerCAmelCase )
lowercase__ : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __UpperCamelCase ( UpperCAmelCase ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowercase__ : Any = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __UpperCamelCase ( UpperCAmelCase ):
import torch
def bert_cos_score_idf(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowercase__ : Dict = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __UpperCamelCase ( UpperCAmelCase ):
def load_from_checkpoint(UpperCAmelCase ):
class UpperCAmelCase :
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
assert len(__lowerCAmelCase ) == 2
lowercase__ : int = [0.1_9, 0.9_2]
return scores, sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowercase__ : Union[str, Any] = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowercase__ : Union[str, Any] = load_from_checkpoint
yield
def __UpperCamelCase ( ):
lowercase__ : Optional[Any] = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
lowercase__ : List[Any] = '''ERROR'''
lowercase__ : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase_ )
| 198 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase =2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase =50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =random.randint(0, len(lowerCAmelCase_ ) - 1 )
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(lowerCAmelCase_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE =random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE =int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =population_score[random.randint(0, lowerCAmelCase_ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =crossover(parent_a[0], lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_, lowerCAmelCase_ ) )
return pop
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE =F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE =F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE =[]
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE =[evaluate(lowerCAmelCase_, lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE =sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[1], reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE =[
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )], lowerCAmelCase_, lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase =(
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowerCamelCase =list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict = 1 , __snake_case : List[Any] = 1 , __snake_case : Optional[int] = 1.0E4 , __snake_case : Union[str, Any] = False , __snake_case : Tuple = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
UpperCAmelCase_ : int = float(embedding_dim // 2 )
UpperCAmelCase_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase_ : List[Any] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase_ : Optional[Any] = jnp.expand_dims(lowerCAmelCase_ , 1 ) * jnp.expand_dims(lowerCAmelCase_ , 0 )
# scale embeddings
UpperCAmelCase_ : Tuple = scale * emb
if flip_sin_to_cos:
UpperCAmelCase_ : str = jnp.concatenate([jnp.cos(lowerCAmelCase_ ), jnp.sin(lowerCAmelCase_ )] , axis=1 )
else:
UpperCAmelCase_ : Tuple = jnp.concatenate([jnp.sin(lowerCAmelCase_ ), jnp.cos(lowerCAmelCase_ )] , axis=1 )
UpperCAmelCase_ : Tuple = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_ )[0], embedding_dim] )
return signal
class lowerCamelCase (nn.Module ):
'''simple docstring'''
_snake_case : List[str] = 3_2
_snake_case : Optional[Any] = jnp.floataa
@nn.compact
def __call__( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_UpperCamelCase )
UpperCAmelCase_ : List[str] = nn.silu(_UpperCamelCase )
UpperCAmelCase_ : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_UpperCamelCase )
return temb
class lowerCamelCase (nn.Module ):
'''simple docstring'''
_snake_case : int = 3_2
_snake_case : Tuple = False
_snake_case : str = 1
@nn.compact
def __call__( self , _UpperCamelCase ) -> List[str]:
return get_sinusoidal_embeddings(
_UpperCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 29 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , A : int=None , **A : Dict ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__snake_case: Dict = model
__snake_case: Union[str, Any] = kwargs.get("""model_save_dir""" , A )
__snake_case: str = kwargs.get("""latest_model_name""" , A )
def __call__( self : Tuple , **A : str ):
__snake_case: Union[str, Any] = {k: np.array(A ) for k, v in kwargs.items()}
return self.model.run(A , A )
@staticmethod
def UpperCAmelCase__ ( A : Union[str, Path] , A : Any=None , A : str=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__snake_case: Tuple = """CPUExecutionProvider"""
return ort.InferenceSession(A , providers=[provider] , sess_options=A )
def UpperCAmelCase__ ( self : str , A : Union[str, Path] , A : Optional[str] = None , **A : Optional[Any] ):
__snake_case: int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__snake_case: Any = self.model_save_dir.joinpath(self.latest_model_name )
__snake_case: Optional[Any] = Path(A ).joinpath(A )
try:
shutil.copyfile(A , A )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__snake_case: Optional[int] = self.model_save_dir.joinpath(A )
if src_path.exists():
__snake_case: Optional[int] = Path(A ).joinpath(A )
try:
shutil.copyfile(A , A )
except shutil.SameFileError:
pass
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[str, os.PathLike] , **A : Union[str, Any] , ):
if os.path.isfile(A ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(A , exist_ok=A )
# saving model weights/files
self._save_pretrained(A , **A )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , A : Union[str, Path] , A : Optional[Union[bool, str, None]] = None , A : Optional[Union[str, None]] = None , A : bool = False , A : Optional[str] = None , A : Optional[str] = None , A : Optional[str] = None , A : Optional["ort.SessionOptions"] = None , **A : Optional[Any] , ):
__snake_case: int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A ):
__snake_case: str = OnnxRuntimeModel.load_model(
os.path.join(A , A ) , provider=A , sess_options=A )
__snake_case: str = Path(A )
# load model from hub
else:
# download model
__snake_case: List[str] = hf_hub_download(
repo_id=A , filename=A , use_auth_token=A , revision=A , cache_dir=A , force_download=A , )
__snake_case: str = Path(A ).parent
__snake_case: Union[str, Any] = Path(A ).name
__snake_case: Optional[int] = OnnxRuntimeModel.load_model(A , provider=A , sess_options=A )
return cls(model=A , **A )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , A : Union[str, Path] , A : bool = True , A : Optional[str] = None , A : Optional[str] = None , **A : Optional[Any] , ):
__snake_case: Union[str, Any] = None
if len(str(A ).split("""@""" ) ) == 2:
__snake_case , __snake_case: int = model_id.split("""@""" )
return cls._from_pretrained(
model_id=A , revision=A , cache_dir=A , force_download=A , use_auth_token=A , **A , )
| 111 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=7 ,__UpperCAmelCase=3 ,__UpperCAmelCase=30 ,__UpperCAmelCase=400 ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=[0.5, 0.5, 0.5] ,__UpperCAmelCase=[0.5, 0.5, 0.5] ,__UpperCAmelCase=True ,__UpperCAmelCase=1 / 255 ,__UpperCAmelCase=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ : Any = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : Dict = min_resolution
lowerCAmelCase__ : Optional[int] = max_resolution
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Union[str, Any] = size
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Union[str, Any] = image_mean
lowerCAmelCase__ : Optional[int] = image_std
lowerCAmelCase__ : List[str] = do_rescale
lowerCAmelCase__ : Dict = rescale_factor
lowerCAmelCase__ : Any = do_pad
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> List[str]:
if not batched:
lowerCAmelCase__ : int = image_inputs[0]
if isinstance(__UpperCAmelCase ,Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase__ : Union[str, Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase__ : List[str] = self.size["""shortest_edge"""]
lowerCAmelCase__ : Dict = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase__ : Dict = self.size["""shortest_edge"""]
lowerCAmelCase__ : Tuple = self.size["""shortest_edge"""]
else:
lowerCAmelCase__ : Dict = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : Optional[int] = max(__UpperCAmelCase ,key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ : Tuple = max(__UpperCAmelCase ,key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Union[str, Any] = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""size""" ) )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Dict:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase ,batched=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : Union[str, Any] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase ,batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase_ ( self ) -> int:
# Initialize image_processing
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : Union[str, Any] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase ,batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processings
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ : Dict = self.image_processing_class(do_resize=__UpperCAmelCase ,do_normalize=__UpperCAmelCase ,do_rescale=__UpperCAmelCase )
# create random PyTorch tensors
lowerCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCAmelCase__ : List[Any] = image_processing_a.pad(__UpperCAmelCase ,return_tensors="""pt""" )
lowerCAmelCase__ : List[Any] = image_processing_a(__UpperCAmelCase ,return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] ,encoded_images["""pixel_values"""] ,atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self ) -> Any:
# prepare image and target
lowerCAmelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" ,"""r""" ) as f:
lowerCAmelCase__ : Optional[Any] = json.loads(f.read() )
lowerCAmelCase__ : Union[str, Any] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
lowerCAmelCase__ : Dict = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCAmelCase__ : Dict = image_processing(images=__UpperCAmelCase ,annotations=__UpperCAmelCase ,return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Union[str, Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,__UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,__UpperCAmelCase ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,__UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,__UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,__UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,__UpperCAmelCase ) )
# verify size
lowerCAmelCase__ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,__UpperCAmelCase ) )
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
# prepare image, target and masks_path
lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" ,"""r""" ) as f:
lowerCAmelCase__ : Dict = json.loads(f.read() )
lowerCAmelCase__ : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
lowerCAmelCase__ : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase__ : Optional[Any] = YolosImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase__ : Optional[Any] = image_processing(images=__UpperCAmelCase ,annotations=__UpperCAmelCase ,masks_path=__UpperCAmelCase ,return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,__UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,__UpperCAmelCase ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,__UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,__UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,__UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ : int = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() ,__UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,__UpperCAmelCase ) )
# verify size
lowerCAmelCase__ : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,__UpperCAmelCase ) )
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_snake_case : List[str] = logging.getLogger(__name__)
_snake_case : List[Any] = tf.data.AUTOTUNE
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config', type=lowerCAmelCase_, default='roberta-base', help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!', )
parser.add_argument(
'--tokenizer', type=lowerCAmelCase_, default='unigram-tokenizer-wikitext', help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.', )
parser.add_argument(
'--per_replica_batch_size', type=lowerCAmelCase_, default=8, help='Batch size per TPU core.', )
parser.add_argument(
'--no_tpu', action='store_true', help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.', )
parser.add_argument(
'--tpu_name', type=lowerCAmelCase_, help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.', default='local', )
parser.add_argument(
'--tpu_zone', type=lowerCAmelCase_, help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.', )
parser.add_argument(
'--gcp_project', type=lowerCAmelCase_, help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16', action='store_true', help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.', )
parser.add_argument(
'--train_dataset', type=lowerCAmelCase_, help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.', )
parser.add_argument(
'--shuffle_buffer_size', type=lowerCAmelCase_, default=2**18, help='Size of the shuffle buffer (in samples)', )
parser.add_argument(
'--eval_dataset', type=lowerCAmelCase_, help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.', )
parser.add_argument(
'--num_epochs', type=lowerCAmelCase_, default=1, help='Number of epochs to train for.', )
parser.add_argument(
'--learning_rate', type=lowerCAmelCase_, default=1E-4, help='Learning rate to use for training.', )
parser.add_argument(
'--weight_decay_rate', type=lowerCAmelCase_, default=1E-3, help='Weight decay rate to use for training.', )
parser.add_argument(
'--max_length', type=lowerCAmelCase_, default=512, help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py', )
parser.add_argument(
'--mlm_probability', type=lowerCAmelCase_, default=0.15, help='Fraction of tokens to mask during training.', )
parser.add_argument('--output_dir', type=lowerCAmelCase_, required=lowerCAmelCase_, help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id', type=lowerCAmelCase_, help='Model ID to upload to on the Hugging Face Hub.' )
__lowerCAmelCase = parser.parse_args()
return args
def a_ ( lowerCAmelCase_ : Any ):
try:
if args.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(lowerCAmelCase_ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase_ )
return tpu
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = 0
for file in file_list:
__lowerCAmelCase = file.split('/' )[-1]
__lowerCAmelCase = re.search(R'-\d+-(\d+)\.tfrecord', lowerCAmelCase_ ).group(1 )
__lowerCAmelCase = int(lowerCAmelCase_ )
num_samples += sample_count
return num_samples
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int=None ):
__lowerCAmelCase = count_samples(lowerCAmelCase_ )
__lowerCAmelCase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase_ )
if shuffle:
__lowerCAmelCase = dataset.shuffle(len(lowerCAmelCase_ ) )
__lowerCAmelCase = tf.data.TFRecordDataset(lowerCAmelCase_, num_parallel_reads=lowerCAmelCase_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowerCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase_ ) )
__lowerCAmelCase = dataset.map(lowerCAmelCase_, num_parallel_calls=lowerCAmelCase_ )
if shuffle:
assert shuffle_buffer_size is not None
__lowerCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
__lowerCAmelCase = dataset.batch(lowerCAmelCase_, drop_remainder=lowerCAmelCase_ )
__lowerCAmelCase = dataset.map(lowerCAmelCase_, num_parallel_calls=lowerCAmelCase_ )
__lowerCAmelCase = dataset.prefetch(lowerCAmelCase_ )
return dataset
def a_ ( lowerCAmelCase_ : str ):
if not args.no_tpu:
__lowerCAmelCase = initialize_tpu(lowerCAmelCase_ )
__lowerCAmelCase = tf.distribute.TPUStrategy(lowerCAmelCase_ )
else:
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
__lowerCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset, '*.tfrecord' ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
__lowerCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset, '*.tfrecord' ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
__lowerCAmelCase = count_samples(lowerCAmelCase_ )
__lowerCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowerCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
__lowerCAmelCase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowerCAmelCase , __lowerCAmelCase = create_optimizer(
num_train_steps=lowerCAmelCase_, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase_, metrics=['accuracy'] )
def decode_fn(lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase_, lowerCAmelCase_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowerCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_, mlm_probability=args.mlm_probability, mlm=lowerCAmelCase_, return_tensors='tf' )
def mask_with_collator(lowerCAmelCase_ : Union[str, Any] ):
# TF really needs an isin() function
__lowerCAmelCase = (
~tf.cast(batch['attention_mask'], tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
__lowerCAmelCase , __lowerCAmelCase = data_collator.tf_mask_tokens(
batch['input_ids'], vocab_size=len(lowerCAmelCase_ ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=lowerCAmelCase_, )
return batch
__lowerCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowerCAmelCase = prepare_dataset(
lowerCAmelCase_, decode_fn=lowerCAmelCase_, mask_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, shuffle=lowerCAmelCase_, shuffle_buffer_size=args.shuffle_buffer_size, )
__lowerCAmelCase = prepare_dataset(
lowerCAmelCase_, decode_fn=lowerCAmelCase_, mask_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, shuffle=lowerCAmelCase_, )
__lowerCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=lowerCAmelCase_ ) )
model.fit(
lowerCAmelCase_, validation_data=lowerCAmelCase_, epochs=args.num_epochs, callbacks=lowerCAmelCase_, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_snake_case : Any = parse_args()
main(args)
| 284 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 334 | 0 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case__ : List[Any] = 4
snake_case__ : Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ : List[str] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 230 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a :
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = False
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = """gelu"""
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = TFDistilBertModel(config=UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(UpperCamelCase )
A__ = [input_ids, input_mask]
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = TFDistilBertForMaskedLM(config=UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: int , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = TFDistilBertForQuestionAnswering(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDistilBertForSequenceClassification(UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_choices
A__ = TFDistilBertForMultipleChoice(UpperCamelCase )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: str , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDistilBertForTokenClassification(UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = TFDistilBertModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , dim=37 )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ = TFDistilBertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(UpperCamelCase )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , UpperCamelCase )
A__ = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1e-4 )
| 335 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.