code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = {}
a = job["started_at"]
a = job["completed_at"]
a = date_parser.parse(A )
a = date_parser.parse(A )
a = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a = start
a = end
a = duration_in_min
return job_info
def __magic_name__ ( A : Optional[int], A : int=None ):
'''simple docstring'''
a = None
if token is not None:
a = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
a = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
a = requests.get(A, headers=A ).json()
a = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(A ) for job in result["jobs"]} )
a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A ):
a = requests.get(url + F"""&page={i + 2}""", headers=A ).json()
job_time.update({job["name"]: extract_time_from_single_job(A ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Any = get_job_time(args.workflow_run_id)
__lowerCAmelCase : str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 662 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : str=("DownEncoderBlock2D",) , __lowerCamelCase : int=(64,) , __lowerCamelCase : str=2 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Optional[int]="silu" , __lowerCamelCase : int=True , ) -> Any:
super().__init__()
a = layers_per_block
a = torch.nn.Convad(
__lowerCamelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
a = None
a = nn.ModuleList([] )
# down
a = block_out_channels[0]
for i, down_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = block_out_channels[i]
a = i == len(__lowerCamelCase ) - 1
a = get_down_block(
__lowerCamelCase , num_layers=self.layers_per_block , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__lowerCamelCase , resnet_groups=__lowerCamelCase , attention_head_dim=__lowerCamelCase , temb_channels=__lowerCamelCase , )
self.down_blocks.append(__lowerCamelCase )
# mid
a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowerCamelCase , temb_channels=__lowerCamelCase , )
# out
a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__lowerCamelCase , eps=1e-6 )
a = nn.SiLU()
a = 2 * out_channels if double_z else out_channels
a = nn.Convad(block_out_channels[-1] , __lowerCamelCase , 3 , padding=1 )
a = False
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Tuple ) -> List[str]:
a = x
a = self.conv_in(__lowerCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCamelCase : str ):
def custom_forward(*__lowerCamelCase : List[Any] ):
return module(*__lowerCamelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCamelCase ) , __lowerCamelCase , use_reentrant=__lowerCamelCase )
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCamelCase , use_reentrant=__lowerCamelCase )
else:
for down_block in self.down_blocks:
a = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCamelCase ) , __lowerCamelCase )
# middle
a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __lowerCamelCase )
else:
# down
for down_block in self.down_blocks:
a = down_block(__lowerCamelCase )
# middle
a = self.mid_block(__lowerCamelCase )
# post-process
a = self.conv_norm_out(__lowerCamelCase )
a = self.conv_act(__lowerCamelCase )
a = self.conv_out(__lowerCamelCase )
return sample
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : str=("UpDecoderBlock2D",) , __lowerCamelCase : Optional[Any]=(64,) , __lowerCamelCase : Any=2 , __lowerCamelCase : str=32 , __lowerCamelCase : str="silu" , __lowerCamelCase : str="group" , ) -> Tuple:
super().__init__()
a = layers_per_block
a = nn.Convad(
__lowerCamelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
a = None
a = nn.ModuleList([] )
a = in_channels if norm_type == "spatial" else None
# mid
a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowerCamelCase , temb_channels=__lowerCamelCase , )
# up
a = list(reversed(__lowerCamelCase ) )
a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = reversed_block_out_channels[i]
a = i == len(__lowerCamelCase ) - 1
a = get_up_block(
__lowerCamelCase , num_layers=self.layers_per_block + 1 , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , prev_output_channel=__lowerCamelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__lowerCamelCase , resnet_groups=__lowerCamelCase , attention_head_dim=__lowerCamelCase , temb_channels=__lowerCamelCase , resnet_time_scale_shift=__lowerCamelCase , )
self.up_blocks.append(__lowerCamelCase )
a = output_channel
# out
if norm_type == "spatial":
a = SpatialNorm(block_out_channels[0] , __lowerCamelCase )
else:
a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__lowerCamelCase , eps=1e-6 )
a = nn.SiLU()
a = nn.Convad(block_out_channels[0] , __lowerCamelCase , 3 , padding=1 )
a = False
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : str=None ) -> Union[str, Any]:
a = z
a = self.conv_in(__lowerCamelCase )
a = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCamelCase : Tuple ):
def custom_forward(*__lowerCamelCase : List[str] ):
return module(*__lowerCamelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCamelCase , __lowerCamelCase , use_reentrant=__lowerCamelCase )
a = sample.to(__lowerCamelCase )
# up
for up_block in self.up_blocks:
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , use_reentrant=__lowerCamelCase )
else:
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCamelCase , __lowerCamelCase )
a = sample.to(__lowerCamelCase )
# up
for up_block in self.up_blocks:
a = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
else:
# middle
a = self.mid_block(__lowerCamelCase , __lowerCamelCase )
a = sample.to(__lowerCamelCase )
# up
for up_block in self.up_blocks:
a = up_block(__lowerCamelCase , __lowerCamelCase )
# post-process
if latent_embeds is None:
a = self.conv_norm_out(__lowerCamelCase )
else:
a = self.conv_norm_out(__lowerCamelCase , __lowerCamelCase )
a = self.conv_act(__lowerCamelCase )
a = self.conv_out(__lowerCamelCase )
return sample
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int="random" , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[int]=True ) -> Tuple:
super().__init__()
a = n_e
a = vq_embed_dim
a = beta
a = legacy
a = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
a = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
a = self.used.shape[0]
a = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
a = self.re_embed
a = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
a = n_e
a = sane_index_shape
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ) -> List[str]:
a = inds.shape
assert len(__lowerCamelCase ) > 1
a = inds.reshape(ishape[0] , -1 )
a = self.used.to(__lowerCamelCase )
a = (inds[:, :, None] == used[None, None, ...]).long()
a = match.argmax(-1 )
a = match.sum(2 ) < 1
if self.unknown_index == "random":
a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
a = self.unknown_index
return new.reshape(__lowerCamelCase )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] ) -> Any:
a = inds.shape
assert len(__lowerCamelCase ) > 1
a = inds.reshape(ishape[0] , -1 )
a = self.used.to(__lowerCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
a = 0 # simply set to zero
a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __lowerCamelCase )
return back.reshape(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Dict ) -> int:
# reshape z -> (batch, height, width, channel) and flatten
a = z.permute(0 , 2 , 3 , 1 ).contiguous()
a = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
a = torch.argmin(torch.cdist(__lowerCamelCase , self.embedding.weight ) , dim=1 )
a = self.embedding(__lowerCamelCase ).view(z.shape )
a = None
a = None
# compute loss for embedding
if not self.legacy:
a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
a = z + (z_q - z).detach()
# reshape back to match original input shape
a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
a = self.remap_to_used(__lowerCamelCase )
a = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
a = indices.reshape(shape[0] , -1 ) # add batch axis
a = self.unmap_to_all(__lowerCamelCase )
a = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
a = self.embedding(__lowerCamelCase )
if shape is not None:
a = z_q.view(__lowerCamelCase )
# reshape back to match original input shape
a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=False ) -> Union[str, Any]:
a = parameters
a , a = torch.chunk(__lowerCamelCase , 2 , dim=1 )
a = torch.clamp(self.logvar , -30.0 , 20.0 )
a = deterministic
a = torch.exp(0.5 * self.logvar )
a = torch.exp(self.logvar )
if self.deterministic:
a = a = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
a = randn_tensor(
self.mean.shape , generator=__lowerCamelCase , device=self.parameters.device , dtype=self.parameters.dtype )
a = self.mean + self.std * sample
return x
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any]=None ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
a = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return self.mean
| 662 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """ctrl"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , __lowerCamelCase : int=24_65_34 , __lowerCamelCase : Optional[Any]=2_56 , __lowerCamelCase : List[str]=12_80 , __lowerCamelCase : List[str]=81_92 , __lowerCamelCase : Tuple=48 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=1e-6 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Optional[int] , ) -> List[Any]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = dff
a = resid_pdrop
a = embd_pdrop
a = layer_norm_epsilon
a = initializer_range
a = use_cache
super().__init__(**__lowerCamelCase )
| 662 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = 'Hello, World!'
__lowerCAmelCase : Any = 'en_XX'
def __magic_name__ ( A : str, A : str, A : bool ):
'''simple docstring'''
a = Path("data_bin" )
a = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A ).parent ), checkpoint_file=Path(A ).name, _name="xmod_base", arch="xmod_base", task="multilingual_masked_lm", data_name_or_path=str(A ), bpe="sentencepiece", sentencepiece_model=str(Path(A ).parent / "sentencepiece.bpe.model" ), src_dict=str(data_dir / "dict.txt" ), )
xmod.eval() # disable dropout
print(A )
a = xmod.model.encoder.sentence_encoder
a = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1E-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, "bottleneck", 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
a = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:", A )
a = XmodForSequenceClassification(A ) if classification_head else XmodForMaskedLM(A )
model.eval()
# Now let's copy all the weights.
# Embeddings
a = xmod_sent_encoder.embed_tokens.weight
a = xmod_sent_encoder.embed_positions.weight
a = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
a = xmod_sent_encoder.layernorm_embedding.weight
a = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
a = model.roberta.encoder.layer[i]
a = xmod_sent_encoder.layers[i]
# self attention
a = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
a = xmod_layer.self_attn.q_proj.weight
a = xmod_layer.self_attn.q_proj.bias
a = xmod_layer.self_attn.k_proj.weight
a = xmod_layer.self_attn.k_proj.bias
a = xmod_layer.self_attn.v_proj.weight
a = xmod_layer.self_attn.v_proj.bias
# self-attention output
a = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
a = xmod_layer.self_attn.out_proj.weight
a = xmod_layer.self_attn.out_proj.bias
a = xmod_layer.self_attn_layer_norm.weight
a = xmod_layer.self_attn_layer_norm.bias
# intermediate
a = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
a = xmod_layer.fca.weight
a = xmod_layer.fca.bias
# output
a = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
a = xmod_layer.fca.weight
a = xmod_layer.fca.bias
a = xmod_layer.final_layer_norm.weight
a = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
a = xmod_layer.adapter_layer_norm.weight
a = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
a = bert_output.adapter_modules[lang_code]
a = xmod_layer.adapter_modules[lang_code]
a = from_adapter.fca.weight
a = from_adapter.fca.bias
a = from_adapter.fca.weight
a = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
a = xmod_sent_encoder.layer_norm.weight
a = xmod_sent_encoder.layer_norm.bias
if classification_head:
a = xmod.model.classification_heads["mnli"].dense.weight
a = xmod.model.classification_heads["mnli"].dense.bias
a = xmod.model.classification_heads["mnli"].out_proj.weight
a = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
a = xmod.model.encoder.lm_head.dense.weight
a = xmod.model.encoder.lm_head.dense.bias
a = xmod.model.encoder.lm_head.layer_norm.weight
a = xmod.model.encoder.lm_head.layer_norm.bias
a = xmod.model.encoder.lm_head.weight
a = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
a = xmod.encode(A ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A )
a = model(A )[0]
if classification_head:
a = xmod.model.classification_heads["mnli"](xmod.extract_features(A ) )
else:
a = xmod.model(A, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
a = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
a = torch.allclose(A, A, atol=1E-3 )
print("Do both models output the same tensors?", "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(A ).mkdir(parents=A, exist_ok=A )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __magic_name__ ( A : List[str], A : Tuple ):
'''simple docstring'''
a = Mock()
a = conn, Mock()
a = iter([1, None] )
a = lambda A : next(A )
# ===== invoke =====
send_file(filename="mytext.txt", testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 662 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : int ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 662 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = """perceiver"""
def __init__( self : Optional[Any] , __lowerCamelCase : Tuple=2_56 , __lowerCamelCase : int=12_80 , __lowerCamelCase : Union[str, Any]=7_68 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[Any]=26 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]="kv" , __lowerCamelCase : Any=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=2_62 , __lowerCamelCase : Union[str, Any]=20_48 , __lowerCamelCase : Tuple=56 , __lowerCamelCase : Optional[Any]=[3_68, 4_96] , __lowerCamelCase : str=16 , __lowerCamelCase : int=19_20 , __lowerCamelCase : int=16 , __lowerCamelCase : Optional[Any]=[1, 16, 2_24, 2_24] , **__lowerCamelCase : Union[str, Any] , ) -> Optional[Any]:
super().__init__(**__lowerCamelCase )
a = num_latents
a = d_latents
a = d_model
a = num_blocks
a = num_self_attends_per_block
a = num_self_attention_heads
a = num_cross_attention_heads
a = qk_channels
a = v_channels
a = cross_attention_shape_for_attention
a = self_attention_widening_factor
a = cross_attention_widening_factor
a = hidden_act
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = use_query_residual
# masked language modeling attributes
a = vocab_size
a = max_position_embeddings
# image classification attributes
a = image_size
# flow attributes
a = train_size
# multimodal autoencoding attributes
a = num_frames
a = audio_samples_per_frame
a = samples_per_patch
a = output_shape
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : Tuple ) -> float:
return 1e-4
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a = preprocessor.num_special_tokens_to_add(__lowerCamelCase )
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a = [" ".join(["a"] ) * seq_length] * batch_size
a = dict(preprocessor(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
a = inputs.pop("input_ids" )
return inputs
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a = compute_effective_axis_dimension(__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
a = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
a = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 1 |
from heapq import heappop, heappush
import numpy as np
def __magic_name__ ( A : np.ndarray, A : tuple[int, int], A : tuple[int, int], A : bool, ):
'''simple docstring'''
a , a = grid.shape
a = [-1, 1, 0, 0]
a = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a , a = [(0, source)], set()
a = np.full((rows, cols), np.inf )
a = 0
a = np.empty((rows, cols), dtype=A )
a = None
while queue:
((a) , (a)) = heappop(A )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a = []
while (x, y) != source:
path.append((x, y) )
a , a = predecessors[x, y]
path.append(A ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A ) ):
a , a = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A, (dist + 1, (nx, ny)) )
a = dist + 1
a = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
from __future__ import annotations
def __magic_name__ ( A : list, A : int, A : int, A : int ):
'''simple docstring'''
a = []
a , a = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
a = result + left + right
return input_list
def __magic_name__ ( A : list ):
'''simple docstring'''
if len(A ) <= 1:
return input_list
a = list(A )
# iteration for two-way merging
a = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0, len(A ), A ):
a = i
a = i + p - 1
a = (low + high + 1) // 2
a = merge(A, A, A, A )
# final merge of last two parts
if p * 2 >= len(A ):
a = i
a = merge(A, 0, A, len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__lowerCAmelCase : Optional[int] = []
else:
__lowerCAmelCase : Tuple = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 662 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=18 , __lowerCamelCase : Dict=30 , __lowerCamelCase : Dict=4_00 , __lowerCamelCase : List[str]=True , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : int=False , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , ) -> Dict:
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size if size is not None else {"height": 18, "width": 20}
a = do_thumbnail
a = do_align_axis
a = do_pad
a = do_normalize
a = image_mean
a = image_std
def __UpperCAmelCase ( self : Any ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = DonutImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
a = DonutImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_thumbnail" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
a = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@is_flaky()
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 662 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__lowerCAmelCase : Optional[Any] = '\\n\n'
__lowerCAmelCase : Optional[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__lowerCAmelCase : Any = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int = 16 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[str, Any]=None ) -> Dict:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a = "cuda"
else:
a = "cuda" if torch.cuda.is_available() else "cpu"
a = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
a = model.to(__lowerCamelCase )
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a = model.config.max_length - 1
else:
a = model.config.max_length
a = tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors="pt" , return_attention_mask=__lowerCamelCase , ).to(__lowerCamelCase )
a = encodings["input_ids"]
a = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a = []
a = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(__lowerCamelCase ) , __lowerCamelCase ) ):
a = min(start_index + batch_size , len(__lowerCamelCase ) )
a = encoded_texts[start_index:end_index]
a = attn_masks[start_index:end_index]
if add_start_token:
a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCamelCase )
a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
a = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCamelCase ), attn_mask] , dim=1 )
a = encoded_batch
with torch.no_grad():
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase ).logits
a = out_logits[..., :-1, :].contiguous()
a = labels[..., 1:].contiguous()
a = attn_mask[..., 1:].contiguous()
a = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCamelCase )}
| 662 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowerCAmelCase : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowerCAmelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( A : Vector, A : Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(A ) - np.asarray(A )) ** 2 ) )
def __magic_name__ ( A : Vector, A : Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(A, A ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])", number=10000, globals=globals(), ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])", number=10000, globals=globals(), ) )
benchmark()
| 662 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __magic_name__ ( A : torch.nn.Module, A : BnbQuantizationConfig, A : Union[str, os.PathLike] = None, A : Optional[Dict[str, Union[int, str, torch.device]]] = None, A : Optional[List[str]] = None, A : Optional[Dict[Union[int, str], Union[int, str]]] = None, A : Optional[Union[str, os.PathLike]] = None, A : bool = False, ):
'''simple docstring'''
a = bnb_quantization_config.load_in_abit
a = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
a = []
# custom device map
if isinstance(A, A ) and len(device_map.keys() ) > 1:
a = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
a = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a = []
a = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
a = load_in_abit
a = load_in_abit
a = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
a = replace_with_bnb_layers(A, A, modules_to_not_convert=A )
# convert param to the right dtype
a = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a = name.replace(".weight", "" ).replace(".bias", "" )
a = getattr(A, A, A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a = replace_with_bnb_layers(
A, A, modules_to_not_convert=A )
a = get_quantized_model_device_map(
A, A, A, max_memory=A, no_split_module_classes=A, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a = True
a = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
A, A, A, dtype=bnb_quantization_config.torch_dtype, offload_folder=A, offload_state_dict=A, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(A, device_map=A, offload_dir=A )
def __magic_name__ ( A : str, A : Union[str, Any], A : Union[str, Any]=None, A : Optional[Any]=None, A : Dict=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
a = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(A, A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
a = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a = {}
a = special_dtypes
a = no_split_module_classes
a = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a = get_balanced_memory(
A, low_zero=(device_map == "balanced_low_0"), max_memory=A, **A, )
a = max_memory
a = infer_auto_device_map(A, **A )
if isinstance(A, A ):
# check if don't have any quantized module on the cpu
a = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def __magic_name__ ( A : Union[str, Any], A : Optional[int], A : int=None, A : List[Any]=None ):
'''simple docstring'''
if modules_to_not_convert is None:
a = []
a , a = _replace_with_bnb_layers(
A, A, A, A )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : str=None, A : Optional[Any]=None, ):
'''simple docstring'''
a = False
for name, module in model.named_children():
if current_key_name is None:
a = []
current_key_name.append(A )
if isinstance(A, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a = ".".join(A )
a = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a = bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=A, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
a = bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
a = module.weight.data
if module.bias is not None:
a = module.bias.data
bnb_module.requires_grad_(A )
setattr(A, A, A )
a = True
if len(list(module.children() ) ) > 0:
a , a = _replace_with_bnb_layers(
A, A, A, A )
a = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
with init_empty_weights():
a = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A, A ):
a = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
a = sum(A, [] )
a = len(A ) > 0
# Check if it is a base model
a = False
if hasattr(A, "base_model_prefix" ):
a = not hasattr(A, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a = list(model.named_children() )
a = [list_modules[-1][0]]
# add last module together with tied weights
a = set(A ) - set(A )
a = list(set(A ) ) + list(A )
# remove ".weight" from the keys
a = [".weight", ".bias"]
a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a = name.replace(A, "" )
filtered_module_names.append(A )
return filtered_module_names
def __magic_name__ ( A : int ):
'''simple docstring'''
for m in model.modules():
if isinstance(A, bnb.nn.Linearabit ):
return True
return False
def __magic_name__ ( A : nn.Module ):
'''simple docstring'''
return next(parameter.parameters() ).device
def __magic_name__ ( A : Dict, A : List[str], A : int, A : Tuple, A : Optional[int], A : Tuple, A : List[Any] ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(A, A, 0, dtype=A, value=A )
a = param_name
a = model
if "." in tensor_name:
a = tensor_name.split("." )
for split in splits[:-1]:
a = getattr(A, A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
a = new_module
a = splits[-1]
# offload weights
a = False
offload_weight(module._parameters[tensor_name], A, A, index=A )
if hasattr(module._parameters[tensor_name], "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace("weight", "SCB" ), A, index=A, )
else:
offload_weight(A, A, A, index=A )
offload_weight(A, param_name.replace("weight", "SCB" ), A, index=A )
set_module_tensor_to_device(A, A, "meta", dtype=A, value=torch.empty(*param.size() ) )
| 662 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__lowerCAmelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : List[Any] ) -> Optional[Any]:
a = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : Any ) -> Any:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a = FlaxBertModel(__lowerCamelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase , repo_id="test-model-flax" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a = FlaxBertModel(__lowerCamelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def __magic_name__ ( A : str, A : List[str] ):
'''simple docstring'''
a = True
a = flatten_dict(modela.params )
a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
a = False
return models_are_equal
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
a = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = FlaxBertModel(__lowerCamelCase )
a = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertTrue(check_models_equal(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = FlaxBertModel(__lowerCamelCase )
a = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCamelCase , __lowerCamelCase ) , max_shard_size="10KB" )
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertTrue(check_models_equal(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = "bert"
a = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a = "bert"
a = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 662 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 1_00_00
SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
class snake_case__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ParquetConfig
def __UpperCAmelCase ( self : Dict ) -> Dict:
return datasets.DatasetInfo(features=self.config.features )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
a = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__lowerCamelCase ):
with open(__lowerCamelCase , "rb" ) as f:
a = datasets.Features.from_arrow_schema(pq.read_schema(__lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> str:
a = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
with open(__lowerCamelCase , "rb" ) as f:
a = pq.ParquetFile(__lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
a = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise
| 662 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Dict:
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
a = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def __UpperCAmelCase ( self : str , **__lowerCamelCase : List[Any] ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Any ) -> int:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : Dict ) -> str:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "lower newer"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "lower newer"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "lower newer"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 662 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
__lowerCAmelCase : Optional[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def __magic_name__ ( A : Dict ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.1.bias", ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.1.weight", ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.3.bias", ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.3.weight", ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
a = key.replace("conditioner_blocks.0", "conditioner_blocks" )
if "prime_prior" in key:
a = key.replace("prime_prior", "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a = key.replace(".emb.", "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k", ".codebook" )
if "y_emb." in key:
return key.replace("y_emb.", "metadata_embedding." )
if "x_emb.emb." in key:
a = key.replace("0.x_emb.emb", "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln", "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln", ".layer_norm" )
if "_ln" in key:
return key.replace("_ln", "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj", "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out", "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out", "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb", "embed_tokens" )
return key
def __magic_name__ ( A : List[str], A : Any, A : List[str], A : List[Any] ):
'''simple docstring'''
a = {}
import re
a = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
a = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
a = re_encoder_block_conv_in.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] )
a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
a = re_encoder_block_conv_in.sub(A, A )
elif re_encoder_block_resnet.fullmatch(A ):
a = re_encoder_block_resnet.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] )
a = {"1": 1, "3": 2}[groups[-2]]
a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a = prefix + resnet_block
a = re_encoder_block_resnet.sub(A, A )
elif re_encoder_block_proj_out.fullmatch(A ):
a = re_encoder_block_proj_out.match(A )
a = regex_match.groups()
a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
a = re_encoder_block_proj_out.sub(A, A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
a = re_decoder_block_conv_out.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] ) - 2
a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
a = re_decoder_block_conv_out.sub(A, A )
elif re_decoder_block_resnet.fullmatch(A ):
a = re_decoder_block_resnet.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] ) - 2
a = {"1": 1, "3": 2}[groups[-2]]
a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a = prefix + resnet_block
a = re_decoder_block_resnet.sub(A, A )
elif re_decoder_block_proj_in.fullmatch(A ):
a = re_decoder_block_proj_in.match(A )
a = regex_match.groups()
a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
a = re_decoder_block_proj_in.sub(A, A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
a = re_prior_cond_conv_out.match(A )
a = regex_match.groups()
a = int(groups[1] ) * 2 + int(groups[2] ) - 2
a = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
a = re_prior_cond_conv_out.sub(A, A )
elif re_prior_cond_resnet.fullmatch(A ):
a = re_prior_cond_resnet.match(A )
a = regex_match.groups()
a = int(groups[1] ) * 2 + int(groups[2] ) - 2
a = {"1": 1, "3": 2}[groups[-2]]
a = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a = prefix + resnet_block
a = re_prior_cond_resnet.sub(A, A )
elif re_prior_cond_proj_in.fullmatch(A ):
a = re_prior_cond_proj_in.match(A )
a = regex_match.groups()
a = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
a = re_prior_cond_proj_in.sub(A, A )
# keep original key
else:
a = original_key
a = replace_key(A )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
a = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
a = original_key
a = original_key
a = value
return new_dict
@torch.no_grad()
def __magic_name__ ( A : Optional[Any]=None, A : Tuple=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
a = requests.get(F"""{PREFIX}{file}""", allow_redirects=A )
os.makedirs(F"""{pytorch_dump_folder_path}/""", exist_ok=A )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""", "wb" ).write(r.content )
a = MODEL_MAPPING[model_name.split("/" )[-1]]
a = JukeboxConfig.from_pretrained(A )
a = JukeboxModel(A )
a = []
a = {}
for i, dict_name in enumerate(A ):
a = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
a = {}
for k in old_dic.keys():
if k.endswith(".b" ):
a = old_dic[k]
elif k.endswith(".w" ):
a = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a = old_dic[k]
else:
a = old_dic[k]
a = "vqvae" if i == 0 else F"""priors.{3 - i}"""
a = fix_jukebox_keys(A, model.state_dict(), A, A )
weight_dict.append(A )
a = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(F"""{pytorch_dump_folder_path}/mapping.json""", "w" ) as txtfile:
json.dump(A, A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 662 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """bit"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""preactivation""", """bottleneck"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""SAME""", """VALID"""]
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=64 , __lowerCamelCase : str=[2_56, 5_12, 10_24, 20_48] , __lowerCamelCase : Any=[3, 4, 6, 3] , __lowerCamelCase : List[str]="preactivation" , __lowerCamelCase : Union[str, Any]="relu" , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=32 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=False , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Any=1 , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Optional[Any] , ) -> Dict:
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
a = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
a = num_channels
a = embedding_size
a = hidden_sizes
a = depths
a = layer_type
a = hidden_act
a = global_padding
a = num_groups
a = drop_path_rate
a = embedding_dynamic_padding
a = output_stride
a = width_factor
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 662 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowerCAmelCase : Optional[int] = '\nimport os\n'
__lowerCAmelCase : Any = '\ndef foo():\n import os\n return False\n'
__lowerCAmelCase : int = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
__lowerCAmelCase : Dict = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
__lowerCAmelCase : Optional[Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
__lowerCAmelCase : Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
__lowerCAmelCase : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
__lowerCAmelCase : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
__lowerCAmelCase : str = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
__lowerCAmelCase : Dict = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
__lowerCAmelCase : str = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case", A )
def __magic_name__ ( A : Union[str, Any], A : Tuple ):
'''simple docstring'''
a = os.path.join(A, "test_file.py" )
with open(A, "w" ) as _tmp_file:
_tmp_file.write(A )
a = get_imports(A )
assert parsed_imports == ["os"]
| 662 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __magic_name__ ( A : Callable ):
'''simple docstring'''
@wraps(A )
def _inner_fn(*A : Any, **A : Optional[Any] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future."""), A, )
return fn(*A, **A )
return _inner_fn
| 662 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
from ...processing_utils import ProcessorMixin
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """SpeechT5FeatureExtractor"""
SCREAMING_SNAKE_CASE_ : int = """SpeechT5Tokenizer"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = kwargs.pop("audio" , __lowerCamelCase )
a = kwargs.pop("text" , __lowerCamelCase )
a = kwargs.pop("text_target" , __lowerCamelCase )
a = kwargs.pop("audio_target" , __lowerCamelCase )
a = kwargs.pop("sampling_rate" , __lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
a = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
elif text is not None:
a = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
else:
a = None
if audio_target is not None:
a = self.feature_extractor(audio_target=__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
a = targets["input_values"]
elif text_target is not None:
a = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
a = targets["input_ids"]
else:
a = None
if inputs is None:
return targets
if targets is not None:
a = labels
a = targets.get("attention_mask" )
if decoder_attention_mask is not None:
a = decoder_attention_mask
return inputs
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> Optional[Any]:
a = kwargs.pop("input_values" , __lowerCamelCase )
a = kwargs.pop("input_ids" , __lowerCamelCase )
a = kwargs.pop("labels" , __lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
a = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif input_ids is not None:
a = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
else:
a = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowerCamelCase , __lowerCamelCase ) and "input_ids" in labels[0]):
a = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
a = targets["input_ids"]
else:
a = self.feature_extractor.feature_size
a = self.feature_extractor.num_mel_bins
a = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
a = feature_size_hack
a = targets["input_values"]
else:
a = None
if inputs is None:
return targets
if targets is not None:
a = labels
a = targets.get("attention_mask" )
if decoder_attention_mask is not None:
a = decoder_attention_mask
return inputs
def __UpperCAmelCase ( self : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
| 662 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase : List[str] = True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __magic_name__ ( A : str ):
'''simple docstring'''
re.sub("<n>", "", A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A ) )
| 662 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 1 |
import warnings
from .generation import TFGenerationMixin
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , _UpperCamelCase , )
| 662 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __magic_name__ ( A : str ):
'''simple docstring'''
a = "huggingface/label-files"
a = "imagenet-1k-id2label.json"
a = json.load(open(hf_hub_download(A, A, repo_type="dataset" ), "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
a = BitConfig(
conv_layer=A, num_labels=1000, idalabel=A, labelaid=A, )
return config
def __magic_name__ ( A : Tuple ):
'''simple docstring'''
if "stem.conv" in name:
a = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
a = name.replace("blocks", "layers" )
if "head.fc" in name:
a = name.replace("head.fc", "classifier.1" )
if name.startswith("norm" ):
a = "bit." + name
if "bit" not in name and "classifier" not in name:
a = "bit.encoder." + name
return name
def __magic_name__ ( ):
'''simple docstring'''
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(A, stream=A ).raw )
return im
@torch.no_grad()
def __magic_name__ ( A : Optional[int], A : int, A : int=False ):
'''simple docstring'''
a = get_config(A )
# load original model from timm
a = create_model(A, pretrained=A )
timm_model.eval()
# load state_dict of original model
a = timm_model.state_dict()
for key in state_dict.copy().keys():
a = state_dict.pop(A )
a = val.squeeze() if "head" in key else val
# load HuggingFace model
a = BitForImageClassification(A )
model.eval()
model.load_state_dict(A )
# create image processor
a = create_transform(**resolve_data_config({}, model=A ) )
a = transform.transforms
a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a = BitImageProcessor(
do_resize=A, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=A, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=A, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
a = prepare_img()
a = transform(A ).unsqueeze(0 )
a = processor(A, return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A, A )
# verify logits
with torch.no_grad():
a = model(A )
a = outputs.logits
print("Logits:", logits[0, :3] )
print("Predicted class:", model.config.idalabel[logits.argmax(-1 ).item()] )
a = timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A, outputs.logits, atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__lowerCAmelCase : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __magic_name__ ( ):
'''simple docstring'''
a = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]" )
a = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
a = parser.parse_args()
if not hasattr(A, "func" ):
parser.print_help()
exit(1 )
# Run
a = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 662 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "depth_multiplier" ) )
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=13 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Dict=0.25 , __lowerCamelCase : List[str]=8 , __lowerCamelCase : Any=True , __lowerCamelCase : int=10_24 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Optional[Any]="relu6" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=10 , __lowerCamelCase : Any=None , ) -> Tuple:
a = parent
a = batch_size
a = num_channels
a = image_size
a = depth_multiplier
a = min_depth
a = tf_padding
a = int(last_hidden_size * depth_multiplier )
a = output_stride
a = hidden_act
a = classifier_dropout_prob
a = use_labels
a = is_training
a = num_labels
a = initializer_range
a = scope
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> List[str]:
a = MobileNetVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> Union[str, Any]:
a = self.num_labels
a = MobileNetVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str ) -> Any:
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Any = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = MobileNetVaModelTester(self )
a = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __UpperCAmelCase ( self : Any ) -> List[str]:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __UpperCAmelCase ( self : List[Any] ) -> int:
pass
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> int:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.hidden_states
a = 26
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = MobileNetVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Any:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : str ) -> Dict:
a = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__lowerCamelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 662 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__ ( ):
'''simple docstring'''
a = [randint(-1000, 1000 ) for i in range(10 )]
a = randint(-5000, 5000 )
return (arr, r)
__lowerCAmelCase : Any = make_dataset()
def __magic_name__ ( A : list[int], A : int ):
'''simple docstring'''
for triplet in permutations(A, 3 ):
if sum(A ) == target:
return tuple(sorted(A ) )
return (0, 0, 0)
def __magic_name__ ( A : list[int], A : int ):
'''simple docstring'''
arr.sort()
a = len(A )
for i in range(n - 1 ):
a , a = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__ ( ):
'''simple docstring'''
a = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
a = "\ntriplet_sum1(*dataset)\n"
a = "\ntriplet_sum2(*dataset)\n"
a = repeat(setup=A, stmt=A, repeat=5, number=10000 )
a = repeat(setup=A, stmt=A, repeat=5, number=10000 )
return (min(A ), min(A ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Optional[int] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 662 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase : List[str] = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase : str = object()
def __magic_name__ ( A : Any, A : Dict ):
'''simple docstring'''
a = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(A ) - len(A ) + 1 ):
a = [x.match(A ) for x, y in zip(A, ks[i:] )]
if matches and all(A ):
return True
return False
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def replace(A : Union[str, Any], A : List[str] ):
for rule, replacement in rules:
if _match(A, A ):
return replacement
return val
return replace
def __magic_name__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", A )),
(("transformer", "wte", "embedding"), P("mp", A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(A, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(A, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = _get_partition_rules()
a = _replacement_rules(A )
a = {k: _unmatched for k in flatten_dict(A )}
a = {k: replace(A, A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(A ) )
| 662 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__lowerCAmelCase : Dict = 'src/transformers'
__lowerCAmelCase : Optional[Any] = 'docs/source/en/tasks'
def __magic_name__ ( A : str, A : Dict, A : str ):
'''simple docstring'''
with open(A, "r", encoding="utf-8", newline="\n" ) as f:
a = f.readlines()
# Find the start prompt.
a = 0
while not lines[start_index].startswith(A ):
start_index += 1
start_index += 1
a = start_index
while not lines[end_index].startswith(A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
__lowerCAmelCase : int = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__lowerCAmelCase : Dict = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __magic_name__ ( A : str ):
'''simple docstring'''
a = TASK_GUIDE_TO_MODELS[task_guide]
a = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A, set() )
a = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __magic_name__ ( A : Optional[int], A : Optional[int]=False ):
'''simple docstring'''
a , a , a , a = _find_text_in_file(
filename=os.path.join(A, A ), start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->", end_prompt="<!--End of the generated tip-->", )
a = get_model_list_for_task(A )
if current_list != new_list:
if overwrite:
with open(os.path.join(A, A ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase : Tuple = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 1 |
from __future__ import annotations
from collections import deque
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[str] ) -> Optional[Any]:
a = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__lowerCamelCase )
self.set_fail_transitions()
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str ) -> None:
a = 0
for character in keyword:
a = self.find_next_state(__lowerCamelCase , __lowerCamelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
a = len(self.adlist ) - 1
else:
a = next_state
self.adlist[current_state]["output"].append(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> None:
a = deque()
for node in self.adlist[0]["next_states"]:
q.append(__lowerCamelCase )
a = 0
while q:
a = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__lowerCamelCase )
a = self.adlist[r]["fail_state"]
while (
self.find_next_state(__lowerCamelCase , self.adlist[child]["value"] ) is None
and state != 0
):
a = self.adlist[state]["fail_state"]
a = self.find_next_state(
__lowerCamelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
a = 0
a = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ) -> dict[str, list[int]]:
a = {} # returns a dict with keywords and list of its occurrences
a = 0
for i in range(len(__lowerCamelCase ) ):
while (
self.find_next_state(__lowerCamelCase , string[i] ) is None
and current_state != 0
):
a = self.adlist[current_state]["fail_state"]
a = self.find_next_state(__lowerCamelCase , string[i] )
if next_state is None:
a = 0
else:
a = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a = []
result[key].append(i - len(__lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = """gpt_neox"""
def __init__( self : Dict , __lowerCamelCase : Dict=5_04_32 , __lowerCamelCase : Any=61_44 , __lowerCamelCase : Dict=44 , __lowerCamelCase : List[str]=64 , __lowerCamelCase : List[str]=2_45_76 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : str=0.25 , __lowerCamelCase : int=1_00_00 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=20_48 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : List[str]=True , __lowerCamelCase : int=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Dict , ) -> Optional[Any]:
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = rotary_pct
a = rotary_emb_base
a = attention_dropout
a = hidden_dropout
a = classifier_dropout
a = initializer_range
a = layer_norm_eps
a = use_cache
a = tie_word_embeddings
a = use_parallel_residual
a = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
a = self.rope_scaling.get("type" , __lowerCamelCase )
a = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 662 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : int=3 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Dict=[10, 20, 30, 40] , __lowerCamelCase : Union[str, Any]=[1, 1, 2, 1] , __lowerCamelCase : int=True , __lowerCamelCase : int=True , __lowerCamelCase : Any="relu" , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=None , ) -> Optional[int]:
a = parent
a = batch_size
a = image_size
a = num_channels
a = embeddings_size
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = hidden_act
a = num_labels
a = scope
a = len(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Dict:
a = RegNetModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Dict:
a = self.num_labels
a = RegNetForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
a = RegNetModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> str:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __UpperCAmelCase ( self : Dict ) -> Any:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __UpperCAmelCase ( self : Dict ) -> int:
pass
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a = layer_type
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = RegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Any ) -> int:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 662 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Dict = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : List[Any] = {'mobilebert-uncased': 512}
__lowerCAmelCase : Any = {}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileBertTokenizer
def __init__( self : int , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : Optional[Any]="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : int="[CLS]" , __lowerCamelCase : List[str]="[MASK]" , __lowerCamelCase : Dict=True , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ) -> List[str]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
a = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__lowerCamelCase )
a = do_lower_case
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str]=None ) -> int:
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 662 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Optional[Any] = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Optional[int] = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
__lowerCAmelCase : List[str] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : str = DistilBertTokenizer
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=True , __lowerCamelCase : List[str]="[UNK]" , __lowerCamelCase : str="[SEP]" , __lowerCamelCase : Tuple="[PAD]" , __lowerCamelCase : List[str]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=None , **__lowerCamelCase : str , ) -> List[str]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
a = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__lowerCamelCase )
a = do_lower_case
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any]=None ) -> int:
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 662 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = tempfile.mkdtemp()
a = 5
# Realm tok
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __UpperCAmelCase ( self : str ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
a = RealmConfig(num_block_records=self.num_block_records )
return config
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __UpperCAmelCase ( self : int ) -> Any:
a = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__lowerCamelCase , )
return block_records
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
a = self.get_config()
a = self.get_dummy_retriever()
a = retriever.tokenizer
a = np.array([0, 3] , dtype="long" )
a = tokenizer(["Test question"] ).input_ids
a = tokenizer(
["the fourth"] , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ).input_ids
a = config.reader_seq_len
a , a , a , a = retriever(
__lowerCamelCase , __lowerCamelCase , answer_ids=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors="np" )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_config()
a = self.get_dummy_retriever()
a = retriever.tokenizer
a = np.array([0, 3, 5] , dtype="long" )
a = tokenizer(["Test question"] ).input_ids
a = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ).input_ids
a = config.reader_seq_len
a , a , a , a = retriever(
__lowerCamelCase , __lowerCamelCase , answer_ids=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors="np" )
self.assertEqual([False, True, True] , __lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
a = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
a = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
a = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 662 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Any=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=5_12 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Dict=4 , ) -> Union[str, Any]:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : str ) -> Any:
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : str = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = FlaxRobertaModelTester(self )
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("roberta-base" , from_pt=__lowerCamelCase )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
| 662 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=99 , __lowerCamelCase : Optional[Any]=24 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Any=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=5_12 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : str=0.02 , __lowerCamelCase : str=3 , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=10_00 , ) -> List[Any]:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = scope
a = range_bbox
def __UpperCAmelCase ( self : Any ) -> List[Any]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = None
if self.use_input_mask:
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : List[str] ) -> Dict:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , ) -> Dict:
a = LiltModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
a = model(__lowerCamelCase , bbox=__lowerCamelCase , token_type_ids=__lowerCamelCase )
a = model(__lowerCamelCase , bbox=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = self.num_labels
a = LiltForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict , ) -> Tuple:
a = LiltForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : str ) -> str:
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> List[str]:
return True
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a = LiltModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LiltModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowerCamelCase )
a = torch.tensor([[1, 2]] , device=__lowerCamelCase )
a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(input_ids=__lowerCamelCase , bbox=__lowerCamelCase )
a = torch.Size([1, 2, 7_68] )
a = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__lowerCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCamelCase , atol=1e-3 ) )
| 662 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__lowerCAmelCase : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
__lowerCAmelCase : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """whisper"""
SCREAMING_SNAKE_CASE_ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Tuple , __lowerCamelCase : Optional[Any]=5_18_65 , __lowerCamelCase : Optional[int]=80 , __lowerCamelCase : int=6 , __lowerCamelCase : Dict=4 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[str]=15_36 , __lowerCamelCase : Union[str, Any]=15_36 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Dict=5_02_57 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Any=2_56 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=False , __lowerCamelCase : Union[str, Any]=15_00 , __lowerCamelCase : Optional[Any]=4_48 , __lowerCamelCase : Tuple=5_02_56 , __lowerCamelCase : Optional[int]=5_02_56 , __lowerCamelCase : Optional[int]=5_02_56 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=[2_20, 5_02_56] , __lowerCamelCase : str=False , __lowerCamelCase : Dict=2_56 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0.05 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Any=10 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Optional[int]=7 , **__lowerCamelCase : int , ) -> Any:
a = vocab_size
a = num_mel_bins
a = d_model
a = encoder_layers
a = encoder_attention_heads
a = decoder_layers
a = decoder_attention_heads
a = decoder_ffn_dim
a = encoder_ffn_dim
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = encoder_layerdrop
a = decoder_layerdrop
a = use_cache
a = encoder_layers
a = scale_embedding # scale factor will be sqrt(d_model) if True
a = max_source_positions
a = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a = classifier_proj_size
a = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
a = median_filter_width
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , suppress_tokens=__lowerCamelCase , begin_suppress_tokens=__lowerCamelCase , **__lowerCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
a = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
a = {0: "batch"}
else:
a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
return common_inputs
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 2_20_50 , __lowerCamelCase : float = 5.0 , __lowerCamelCase : int = 2_20 , ) -> Mapping[str, Any]:
a = OrderedDict()
a = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__lowerCamelCase , framework=__lowerCamelCase , sampling_rate=__lowerCamelCase , time_duration=__lowerCamelCase , frequency=__lowerCamelCase , )
a = encoder_inputs["input_features"].shape[2]
a = encoder_sequence_length // 2 if self.use_past else seq_length
a = super().generate_dummy_inputs(
preprocessor.tokenizer , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = encoder_inputs.pop("input_features" )
a = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
a = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __UpperCAmelCase ( self : List[Any] ) -> float:
return 1e-3
| 662 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 1 |
import copy
import re
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = """hp"""
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Dict = None
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> Dict:
a = prefix
a = defaults
cls.build_naming_info()
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
if len(__lowerCamelCase ) == 0:
return ""
a = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCamelCase ) + 1 ):
a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowerCamelCase : Union[str, Any] ):
a = ""
while integer != 0:
a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
a = 0
while True:
a = word + "#" + int_to_alphabetic(__lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
a = sword
break
a = short_word
a = word
return short_word
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> List[Any]:
a = param_name.split("_" )
a = [TrialShortNamer.shortname_for_word(__lowerCamelCase , __lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
a = ["", "_"]
for separator in separators:
a = separator.join(__lowerCamelCase )
if shortname not in info["reverse_short_param"]:
a = shortname
a = param_name
return shortname
return param_name
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str ) -> Optional[int]:
a = TrialShortNamer.shortname_for_key(__lowerCamelCase , __lowerCamelCase )
a = short_name
a = param_name
@classmethod
def __UpperCAmelCase ( cls : str ) -> Union[str, Any]:
if cls.NAMING_INFO is not None:
return
a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCamelCase , __lowerCamelCase )
a = info
@classmethod
def __UpperCAmelCase ( cls : List[str] , __lowerCamelCase : Any ) -> Any:
cls.build_naming_info()
assert cls.PREFIX is not None
a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
a = cls.NAMING_INFO["short_param"][k]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = 1 if v else 0
a = "" if isinstance(__lowerCamelCase , (int, float) ) else "-"
a = f"""{key}{sep}{v}"""
name.append(__lowerCamelCase )
return "_".join(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : Dict , __lowerCamelCase : Optional[Any] ) -> Tuple:
a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
a = []
else:
a = repr.split("_" )
a = {}
for value in values:
if "-" in value:
a , a = value.split("-" )
else:
a = re.sub("[0-9.]" , "" , __lowerCamelCase )
a = float(re.sub("[^0-9.]" , "" , __lowerCamelCase ) )
a = cls.NAMING_INFO["reverse_short_param"][p_k]
a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
a = cls.DEFAULTS[k]
return parameters
| 662 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 1 |
def __magic_name__ ( A : int, A : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
'''simple docstring'''
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 662 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE_ : int = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE_ : List[str] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = 32
a = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCamelCase , projection_dim=__lowerCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCamelCase , num_layers=1 , )
torch.manual_seed(0 )
a = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=__lowerCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a = StableUnCLIPImageNormalizer(embedding_dim=__lowerCamelCase )
a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCamelCase , layers_per_block=1 , upcast_attention=__lowerCamelCase , use_linear_projection=__lowerCamelCase , )
torch.manual_seed(0 )
a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
a = AutoencoderKL()
a = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=0 ) -> Any:
if str(__lowerCamelCase ).startswith("mps" ):
a = torch.manual_seed(__lowerCamelCase )
else:
a = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : int ) -> int:
a = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a = torch.Generator(device="cpu" ).manual_seed(0 )
a = pipe("anime turle" , generator=__lowerCamelCase , output_type="np" )
a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 662 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __lt__( self : str , __lowerCamelCase : int ) -> Any:
return self[-1] < other[-1]
def __eq__( self : int , __lowerCamelCase : Optional[Any] ) -> str:
return self[-1] == other[-1]
def __magic_name__ ( A : list ):
'''simple docstring'''
a = []
# sort into stacks
for element in collection:
a = Stack([element] )
a = bisect_left(A, A )
if i != len(A ):
stacks[i].append(A )
else:
stacks.append(A )
# use a heap-based merge to merge stack efficiently
a = merge(*(reversed(A ) for stack in stacks) )
return collection
if __name__ == "__main__":
__lowerCAmelCase : int = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : str = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 662 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 1 |
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None ) -> List[str]:
a = data
a = previous
a = next_node
def __str__( self : Dict ) -> str:
return f"""{self.data}"""
def __UpperCAmelCase ( self : Tuple ) -> int:
return self.data
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return self.next
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.previous
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any ) -> List[str]:
a = head
def __iter__( self : List[str] ) -> List[str]:
return self
def __UpperCAmelCase ( self : int ) -> List[str]:
if not self.current:
raise StopIteration
else:
a = self.current.get_data()
a = self.current.get_next()
return value
class snake_case__ :
"""simple docstring"""
def __init__( self : str ) -> List[str]:
a = None # First node in list
a = None # Last node in list
def __str__( self : str ) -> Optional[int]:
a = self.head
a = []
while current is not None:
nodes.append(current.get_data() )
a = current.get_next()
return " ".join(str(__lowerCamelCase ) for node in nodes )
def __contains__( self : Optional[Any] , __lowerCamelCase : int ) -> Optional[int]:
a = self.head
while current:
if current.get_data() == value:
return True
a = current.get_next()
return False
def __iter__( self : Dict ) -> List[Any]:
return LinkedListIterator(self.head )
def __UpperCAmelCase ( self : Dict ) -> Any:
if self.head:
return self.head.get_data()
return None
def __UpperCAmelCase ( self : List[Any] ) -> int:
if self.tail:
return self.tail.get_data()
return None
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Node ) -> None:
if self.head is None:
a = node
a = node
else:
self.insert_before_node(self.head , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Node ) -> None:
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.insert_after_node(self.tail , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int ) -> None:
a = Node(__lowerCamelCase )
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.set_tail(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Node , __lowerCamelCase : Node ) -> None:
a = node
a = node.previous
if node.get_previous() is None:
a = node_to_insert
else:
a = node_to_insert
a = node_to_insert
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Node , __lowerCamelCase : Node ) -> None:
a = node
a = node.next
if node.get_next() is None:
a = node_to_insert
else:
a = node_to_insert
a = node_to_insert
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
a = 1
a = Node(__lowerCamelCase )
a = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCamelCase , __lowerCamelCase )
return
current_position += 1
a = node.next
self.insert_after_node(self.tail , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ) -> Node:
a = self.head
while node:
if node.get_data() == item:
return node
a = node.get_next()
raise Exception("Node not found" )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> str:
if (node := self.get_node(__lowerCamelCase )) is not None:
if node == self.head:
a = self.head.get_next()
if node == self.tail:
a = self.tail.get_previous()
self.remove_node_pointers(__lowerCamelCase )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Node ) -> None:
if node.get_next():
a = node.previous
if node.get_previous():
a = node.next
a = None
a = None
def __UpperCAmelCase ( self : Tuple ) -> str:
return self.head is None
def __magic_name__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __magic_name__ ( A : str ):
'''simple docstring'''
return "".join(sorted(A ) )
def __magic_name__ ( A : str ):
'''simple docstring'''
return word_by_signature[signature(A )]
__lowerCAmelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__lowerCAmelCase : List[Any] = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase : Dict = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 662 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : List[str] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__lowerCamelCase ) , 10_12 )
def __UpperCAmelCase ( self : Any ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
a = XLMProphetNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a = "Hello World!"
a = [3_53_89, 66_72, 49, 2]
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
# fmt: off
a = {"input_ids": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 662 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 1 |
import math
import sys
def __magic_name__ ( A : int ):
'''simple docstring'''
if number != int(A ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
a = [-1] * (number + 1)
a = 0
for i in range(1, number + 1 ):
a = sys.maxsize
a = int(math.sqrt(A ) )
for j in range(1, root + 1 ):
a = 1 + answers[i - (j**2)]
a = min(A, A )
a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """decision_transformer"""
SCREAMING_SNAKE_CASE_ : Any = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : Optional[int]=17 , __lowerCamelCase : int=4 , __lowerCamelCase : Union[str, Any]=1_28 , __lowerCamelCase : Tuple=40_96 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Union[str, Any]=10_24 , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[Any]="relu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=5_02_56 , __lowerCamelCase : Union[str, Any]=5_02_56 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=False , **__lowerCamelCase : str , ) -> Tuple:
a = state_dim
a = act_dim
a = hidden_size
a = max_ep_len
a = action_tanh
a = vocab_size
a = n_positions
a = n_layer
a = n_head
a = n_inner
a = activation_function
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = scale_attn_weights
a = use_cache
a = scale_attn_by_inverse_layer_idx
a = reorder_and_upcast_attn
a = bos_token_id
a = eos_token_id
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 662 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__lowerCAmelCase : str = datasets.utils.logging.get_logger(__name__)
class snake_case__ (folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = None
SCREAMING_SNAKE_CASE_ : bool = None
class snake_case__ (folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = datasets.Audio()
SCREAMING_SNAKE_CASE_ : Any = """audio"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AudioFolderConfig
SCREAMING_SNAKE_CASE_ : List[str] # definition at the bottom of the script
SCREAMING_SNAKE_CASE_ : str = AudioClassification(audio_column="""audio""" , label_column="""label""" )
__lowerCAmelCase : Dict = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__lowerCAmelCase : List[Any] = AUDIO_EXTENSIONS
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] = 'true'
def __magic_name__ ( A : Dict, A : Tuple=82, A : List[Any]=16 ):
'''simple docstring'''
set_seed(42 )
a = RegressionModel()
a = deepcopy(A )
a = RegressionDataset(length=A )
a = DataLoader(A, batch_size=A )
model.to(accelerator.device )
a , a = accelerator.prepare(A, A )
return model, ddp_model, dataloader
def __magic_name__ ( A : Accelerator, A : Union[str, Any]=False ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
a = load_dataset("glue", "mrpc", split="validation" )
def tokenize_function(A : Tuple ):
a = tokenizer(examples["sentence1"], examples["sentence2"], truncation=A, max_length=A )
return outputs
with accelerator.main_process_first():
a = dataset.map(
A, batched=A, remove_columns=["idx", "sentence1", "sentence2"], )
a = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(A : List[Any] ):
if use_longest:
return tokenizer.pad(A, padding="longest", return_tensors="pt" )
return tokenizer.pad(A, padding="max_length", max_length=128, return_tensors="pt" )
return DataLoader(A, shuffle=A, collate_fn=A, batch_size=16 )
def __magic_name__ ( A : Dict, A : str ):
'''simple docstring'''
a = Accelerator(dispatch_batches=A, split_batches=A )
a = get_dataloader(A, not dispatch_batches )
a = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased", return_dict=A )
a , a = accelerator.prepare(A, A )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__ ( A : List[Any], A : Optional[Any], A : str ):
'''simple docstring'''
a = []
for batch in dataloader:
a , a = batch.values()
with torch.no_grad():
a = model(A )
a , a = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a = [], []
for logit, targ in logits_and_targets:
logits.append(A )
targs.append(A )
a , a = torch.cat(A ), torch.cat(A )
return logits, targs
def __magic_name__ ( A : Accelerator, A : Dict=82, A : Any=False, A : List[str]=False, A : List[str]=16 ):
'''simple docstring'''
a , a , a = get_basic_setup(A, A, A )
a , a = generate_predictions(A, A, A )
assert (
len(A ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A )}"""
def __magic_name__ ( A : bool = False, A : bool = False ):
'''simple docstring'''
a = evaluate.load("glue", "mrpc" )
a , a = get_mrpc_setup(A, A )
# First do baseline
a , a , a = setup["no"]
model.to(A )
model.eval()
for batch in dataloader:
batch.to(A )
with torch.inference_mode():
a = model(**A )
a = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A, references=batch["labels"] )
a = metric.compute()
# Then do distributed
a , a , a = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a = model(**A )
a = outputs.logits.argmax(dim=-1 )
a = batch["labels"]
a , a = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A, references=A )
a = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator(split_batches=A, dispatch_batches=A )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(A, A )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a = Accelerator(split_batches=A, dispatch_batches=A )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(A, 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
a = Accelerator()
test_torch_metrics(A, 512 )
accelerator.state._reset_state()
def __magic_name__ ( A : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , **__lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , **__lowerCamelCase )
a = Sql(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , sql=__lowerCamelCase , con=__lowerCamelCase , **__lowerCamelCase , )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , )
# Build dataset for splits
a = self.builder.as_dataset(
split="train" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Dataset , __lowerCamelCase : str , __lowerCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
a = dataset
a = name
a = con
a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a = num_proc
a = to_sql_kwargs
def __UpperCAmelCase ( self : str ) -> int:
a = self.to_sql_kwargs.pop("sql" , __lowerCamelCase )
a = self.to_sql_kwargs.pop("con" , __lowerCamelCase )
a = self.to_sql_kwargs.pop("index" , __lowerCamelCase )
a = self._write(index=__lowerCamelCase , **self.to_sql_kwargs )
return written
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple ) -> Optional[int]:
a , a , a = args
a = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a = query_table(
table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
a = batch.to_pandas()
a = df.to_sql(self.name , self.con , index=__lowerCamelCase , **__lowerCamelCase )
return num_rows or len(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int , **__lowerCamelCase : int ) -> int:
a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a , a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 662 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 1 |
from statistics import mean
import numpy as np
def __magic_name__ ( A : list, A : list, A : list, A : int ):
'''simple docstring'''
a = 0
# Number of processes finished
a = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a = [0] * no_of_process
# List to include calculation results
a = [0] * no_of_process
# Sort by arrival time.
a = [burst_time[i] for i in np.argsort(A )]
a = [process_name[i] for i in np.argsort(A )]
arrival_time.sort()
while no_of_process > finished_process_count:
a = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a = arrival_time[i]
a = 0
# Index showing the location of the process being performed
a = 0
# Saves the current response ratio.
a = 0
for i in range(0, A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a = temp
a = i
# Calculate the turn around time
a = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __magic_name__ ( A : list, A : list, A : list, A : int ):
'''simple docstring'''
a = [0] * no_of_process
for i in range(0, A ):
a = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__lowerCAmelCase : str = 5
__lowerCAmelCase : int = ['A', 'B', 'C', 'D', 'E']
__lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5]
__lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5]
__lowerCAmelCase : Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__lowerCAmelCase : Any = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 662 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set() )
@pytest.fixture
def __magic_name__ ( A : str ):
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Tuple ) -> Any:
a = metric_id
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [MetricMock(_UpperCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __UpperCAmelCase ( self : int ) -> Tuple:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock() )
@pytest.mark.parametrize(
"func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __magic_name__ ( A : Optional[Any], A : Optional[int], A : Any, A : Tuple, A : List[str] ):
'''simple docstring'''
if "tmp_path" in args:
a = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A, match="https://huggingface.co/docs/evaluate" ):
func(*A )
| 662 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = '▁'
__lowerCAmelCase : Union[str, Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__lowerCAmelCase : Dict = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__lowerCAmelCase : List[str] = {
'facebook/s2t-small-librispeech-asr': 1024,
}
__lowerCAmelCase : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__lowerCAmelCase : Tuple = {'mustc': MUSTC_LANGS}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[Any] , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_upper_case=__lowerCamelCase , do_lower_case=__lowerCamelCase , tgt_lang=__lowerCamelCase , lang_codes=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = do_upper_case
a = do_lower_case
a = load_json(__lowerCamelCase )
a = {v: k for k, v in self.encoder.items()}
a = spm_file
a = load_spm(__lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
a = lang_codes
a = LANGUAGES[lang_codes]
a = [f"""<lang:{lang}>""" for lang in self.langs]
a = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
a = self.lang_tokens
a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
a = {}
@property
def __UpperCAmelCase ( self : List[str] ) -> int:
return len(self.encoder )
@property
def __UpperCAmelCase ( self : Dict ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> None:
a = new_tgt_lang
self.set_tgt_lang_special_tokens(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str ) -> None:
a = self.lang_code_to_id[tgt_lang]
a = [lang_code_id]
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple ) -> List[Any]:
return self.encoder.get(__lowerCamelCase , self.encoder[self.unk_token] )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int ) -> str:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] ) -> str:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
a = self.sp_model.decode(__lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
a = self.sp_model.decode(__lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
a = [1] * len(self.prefix_tokens )
a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Dict:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : Union[str, Any] , __lowerCamelCase : Dict ) -> None:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = Path(__lowerCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (str(__lowerCamelCase ), str(__lowerCamelCase ))
def __magic_name__ ( A : str, A : Dict[str, Any] ):
'''simple docstring'''
a = sentencepiece.SentencePieceProcessor(**A )
spm.Load(str(A ) )
return spm
def __magic_name__ ( A : str ):
'''simple docstring'''
with open(A, "r" ) as f:
return json.load(A )
def __magic_name__ ( A : Dict, A : str ):
'''simple docstring'''
with open(A, "w" ) as f:
json.dump(A, A, indent=2 )
| 662 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Any ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> Dict:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Any , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Any , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ) -> Tuple:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : str , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""torch""", """transformers""", """onnx"""]
def __init__( self : int , *__lowerCamelCase : Any , **__lowerCamelCase : Any ) -> List[str]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Any , *__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : int , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> Optional[int]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[Any] ) -> Any:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 662 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = (1 - _cos) / 2
a = 1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = (1 + _cos) / 2
a = -1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = _sin / 2
a = 0
a = -ba
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 1 - alpha
a = -2 * _cos
a = 1 + alpha
a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = 1 + alpha * big_a
a = -2 * _cos
a = 1 - alpha * big_a
a = 1 + alpha / big_a
a = -2 * _cos
a = 1 - alpha / big_a
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(A ) * alpha
a = big_a * (pmc + aaa)
a = 2 * big_a * mpc
a = big_a * (pmc - aaa)
a = ppmc + aaa
a = -2 * pmpc
a = ppmc - aaa
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(A ) * alpha
a = big_a * (ppmc + aaa)
a = -2 * big_a * pmpc
a = big_a * (ppmc - aaa)
a = pmc + aaa
a = 2 * mpc
a = pmc - aaa
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 662 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : str = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['CLIPFeatureExtractor']
__lowerCAmelCase : List[Any] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 1 |
def __magic_name__ ( A : list[int], A : int ):
'''simple docstring'''
a = len(A )
a = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
a = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
a = False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
a = subset[i - 1][j]
if arr[i - 1] <= j:
a = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 1 |
import itertools
import math
def __magic_name__ ( A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( ):
'''simple docstring'''
a = 2
while True:
if is_prime(A ):
yield num
num += 1
def __magic_name__ ( A : int = 10001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator(), nth - 1, A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : int=16 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=None , ) -> Any:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> Optional[Any]:
a = LlamaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ) -> Tuple:
a = True
a = LlamaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ) -> str:
a = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , ) -> Optional[Any]:
a = True
a = True
a = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
a = LlamaModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __UpperCAmelCase ( self : Dict ) -> Any:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = LlamaModel(__lowerCamelCase )
original_model.to(__lowerCamelCase )
original_model.eval()
a = original_model(__lowerCamelCase ).last_hidden_state
a = original_model(__lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = LlamaModel(__lowerCamelCase )
scaled_model.to(__lowerCamelCase )
scaled_model.eval()
a = scaled_model(__lowerCamelCase ).last_hidden_state
a = scaled_model(__lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
a = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
a = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
a = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
a = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a = "Simply put, the theory of relativity states that "
a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a = tokenizer.encode(__lowerCamelCase , return_tensors="pt" )
a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__lowerCamelCase )
# greedy generation outputs
a = model.generate(__lowerCamelCase , max_new_tokens=64 , top_p=__lowerCamelCase , temperature=1 , do_sample=__lowerCamelCase )
a = tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 662 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : str = "cpu" , __lowerCamelCase : str = "openai/clip-vit-large-patch14" ) -> None:
a = device
a = CLIPTokenizerFast.from_pretrained(__lowerCamelCase )
a = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a = torchvision.transforms.Normalize(self.image_mean , self.image_std )
a = torchvision.transforms.Resize(2_24 )
a = torchvision.transforms.CenterCrop(2_24 )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> List[Any]:
a = self.resize(__lowerCamelCase )
a = self.center_crop(__lowerCamelCase )
a = self.normalize(__lowerCamelCase )
return images
def __call__( self : Tuple , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ) -> Optional[int]:
a = self.tokenizer(text=__lowerCamelCase , **__lowerCamelCase )
a = self.preprocess_img(__lowerCamelCase )
a = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict=10 , __lowerCamelCase : int=0.01 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Dict=False , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]="image" , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=False , ) -> None:
super().__init__()
a = None
a = device if device else get_device()
if vqgan:
a = vqgan
else:
a = load_vqgan(self.device , conf_path=__lowerCamelCase , ckpt_path=__lowerCamelCase )
self.vqgan.eval()
if clip:
a = clip
else:
a = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
a = ProcessorGradientFlow(device=self.device )
a = iterations
a = lr
a = log
a = make_grid
a = return_val
a = quantize
a = self.vqgan.decoder.z_shape
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=5 , __lowerCamelCase : Dict=True ) -> Optional[int]:
a = []
if output_path is None:
a = "./animation.gif"
if input_path is None:
a = self.save_path
a = sorted(glob(input_path + "/*" ) )
if not len(__lowerCamelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__lowerCamelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
a = total_duration / len(__lowerCamelCase )
a = [frame_duration] * len(__lowerCamelCase )
if extend_frames:
a = 1.5
a = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__lowerCamelCase ) )
imageio.mimsave(__lowerCamelCase , __lowerCamelCase , duration=__lowerCamelCase )
print(f"""gif saved to {output_path}""" )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None ) -> int:
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
a = preprocess(Image.open(__lowerCamelCase ) , target_image_size=2_56 ).to(self.device )
a = preprocess_vqgan(__lowerCamelCase )
a , *a = self.vqgan.encode(__lowerCamelCase )
return z
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = self.latent.detach().requires_grad_()
a = base_latent + transform_vector
if self.quantize:
a , *a = self.vqgan.quantize(__lowerCamelCase )
else:
a = trans_latent
return self.vqgan.decode(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
a = self.clip_preprocessor(text=__lowerCamelCase , images=__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase )
a = self.clip(**__lowerCamelCase )
a = clip_outputs.logits_per_image
if weights is not None:
a = similarity_logits * weights
return similarity_logits.sum()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Tuple:
a = self._get_clip_similarity(pos_prompts["prompts"] , __lowerCamelCase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
a = self._get_clip_similarity(neg_prompts["prompts"] , __lowerCamelCase , weights=neg_prompts["weights"] )
else:
a = torch.tensor([1] , device=self.device )
a = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase )
return loss
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
a = torch.randn_like(self.latent , requires_grad=__lowerCamelCase , device=self.device )
a = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
a = self._add_vector(__lowerCamelCase )
a = loop_post_process(__lowerCamelCase )
a = self._get_CLIP_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print("CLIP loss" , __lowerCamelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
wandb.init(reinit=__lowerCamelCase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
a = Image.open(__lowerCamelCase )
a = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] ) -> str:
if not prompts:
return []
a = []
a = []
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__lowerCamelCase , (tuple, list) ):
a = prompt[0]
a = float(prompt[1] )
elif ":" in prompt:
a , a = prompt.split(":" )
a = float(__lowerCamelCase )
else:
a = prompt
a = 1.0
processed_prompts.append(__lowerCamelCase )
weights.append(__lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCamelCase , device=self.device ),
}
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=None , ) -> Dict:
if image_path:
a = self._get_latent(__lowerCamelCase )
else:
a = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
a = self.process_prompts(__lowerCamelCase )
a = self.process_prompts(__lowerCamelCase )
if save_final and save_path is None:
a = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
a = save_path + "_" + get_timestamp()
os.makedirs(__lowerCamelCase )
a = save_path
a = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__lowerCamelCase ) )
a = loop_post_process(__lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
if show_intermediate:
show_pil(__lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(__lowerCamelCase )} )
if show_final:
show_pil(__lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 662 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 1 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
a = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : int ) -> List[str]:
a = "sgugger/tiny-distilbert-classification"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Any ) -> int:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
a = "sshleifer/tiny-gpt2"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , [config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
a = "sshleifer/tiny-gpt2"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , [config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a = "sshleifer/tiny-gpt2"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , [config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = "patrickvonplaten/t5-tiny-random"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , configs=[config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
a = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv" ) , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv" ) ).exists() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
a = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase : Optional[int] ):
self.assertTrue(hasattr(__lowerCamelCase , "sequential" ) )
self.assertTrue(hasattr(__lowerCamelCase , "cumulative" ) )
self.assertTrue(hasattr(__lowerCamelCase , "current" ) )
self.assertTrue(hasattr(__lowerCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt" ) , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt" ) ).exists() )
| 662 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def __magic_name__ ( A : Optional[Any], A : str=False, A : List[Any]=False ):
'''simple docstring'''
a = "backbone." if is_semantic else ""
a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(F"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(F"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(F"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __magic_name__ ( A : str, A : List[Any], A : Union[str, Any]=False, A : str=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
a = "backbone." if is_semantic else ""
# queries, keys and values
a = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
a = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
a = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
a = in_proj_weight[
: config.hidden_size, :
]
a = q_bias
a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a = in_proj_weight[
-config.hidden_size :, :
]
a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
a = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
a = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
a = gamma_a
a = gamma_a
def __magic_name__ ( A : Dict, A : Union[str, Any], A : List[Any] ):
'''simple docstring'''
a = dct.pop(A )
a = val
def __magic_name__ ( ):
'''simple docstring'''
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(A, stream=A ).raw )
return im
@torch.no_grad()
def __magic_name__ ( A : Optional[int], A : List[Any], A : Optional[int]=False ):
'''simple docstring'''
a = False if "rvlcdip" in checkpoint_url else True
a = BeitConfig(use_absolute_position_embeddings=A, use_mask_token=A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
a = 1024
a = 4096
a = 24
a = 16
# labels
if "rvlcdip" in checkpoint_url:
a = 16
a = "huggingface/label-files"
a = "rvlcdip-id2label.json"
a = json.load(open(hf_hub_download(A, A, repo_type="dataset" ), "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
a = torch.hub.load_state_dict_from_url(A, map_location="cpu" )["model"]
a = create_rename_keys(A, has_lm_head=A )
for src, dest in rename_keys:
rename_key(A, A, A )
read_in_q_k_v(A, A, has_lm_head=A )
# load HuggingFace model
a = BeitForMaskedImageModeling(A ) if has_lm_head else BeitForImageClassification(A )
model.eval()
model.load_state_dict(A )
# Check outputs on an image
a = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=A )
a = prepare_img()
a = image_processor(images=A, return_tensors="pt" )
a = encoding["pixel_values"]
a = model(A )
a = outputs.logits
# verify logits
a = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(A ), "Shape of logits not as expected"
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
if has_lm_head:
a = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
a = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(A, A ), organization="nielsr", commit_message="Add image processor", use_temp_dir=A, )
model.push_to_hub(
repo_path_or_name=Path(A, A ), organization="nielsr", commit_message="Add model", use_temp_dir=A, )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__lowerCAmelCase : int = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 1 |
def __magic_name__ ( A : list[int], A : str ):
'''simple docstring'''
a = int(A )
# Initialize Result
a = []
# Traverse through all denomination
for denomination in reversed(A ):
# Find denominations
while int(A ) >= int(A ):
total_value -= int(A )
answer.append(A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__lowerCAmelCase : List[str] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__lowerCAmelCase : int = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCAmelCase : Dict = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__lowerCAmelCase : str = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
__lowerCAmelCase : int = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 662 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 1 |
__lowerCAmelCase : List[Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 662 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowerCAmelCase : List[str] = float('nan')
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Tuple ) -> str:
a = sys.stdout
a = open(__lowerCamelCase , "a" )
def __getattr__( self : Tuple , __lowerCamelCase : str ) -> Optional[int]:
return getattr(self.stdout , __lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> str:
self.stdout.write(__lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M ) )
def __magic_name__ ( A : Optional[Any]=80, A : Optional[Any]=False ):
'''simple docstring'''
a = []
# deal with critical env vars
a = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
a = os.environ.get(A, A )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
a = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(A )
# now the normal args
cmd += list(map(shlex.quote, sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
a = []
a = ""
while len(A ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(A ) == 0 or len(A ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(A )
a = ""
return "\\\n".join(A )
def __magic_name__ ( A : str, A : Tuple ):
'''simple docstring'''
a = re.sub(R"[\\\n]+", " ", args.base_cmd )
# remove --output_dir if any and set our own
a = re.sub("--output_dir\s+[^\s]+", "", args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
a = re.sub("--overwrite_output_dir\s+", "", args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __magic_name__ ( A : List[Any], A : Optional[int], A : Optional[Any], A : Tuple, A : Dict, A : Any, A : Tuple ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0, 100 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )}, )
a = subprocess.run(A, capture_output=A, text=A )
if verbose:
print("STDOUT", result.stdout )
print("STDERR", result.stderr )
# save the streams
a = variation.replace(" ", "-" )
with open(Path(A ) / F"""log.{prefix}.stdout.txt""", "w" ) as f:
f.write(result.stdout )
with open(Path(A ) / F"""log.{prefix}.stderr.txt""", "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""", "r", encoding="utf-8" ) as f:
a = json.load(A )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __magic_name__ ( A : Optional[int], A : int, A : Dict, A : Tuple, A : Any, A : Optional[Any], A : int, A : Union[str, Any], A : str, A : Optional[int], ):
'''simple docstring'''
a = []
a = []
a = F"""{id}: {variation:<{longest_variation_len}}"""
a = F"""{preamble}: """
a = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(A ), desc=A, leave=A ):
a = process_run_single(
A, A, A, A, A, A, A )
a = single_run_metrics[target_metric_key]
if not math.isnan(A ):
metrics.append(A )
results.append(A )
outcome += "✓"
else:
outcome += "✘"
a = F"""\33[2K\r{outcome}"""
if len(A ) > 0:
a = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
a = round(mean_metrics[target_metric_key], 2 )
a = F"""{outcome} {mean_target}"""
if len(A ) > 1:
results_str += F""" {tuple(round(A, 2 ) for x in results )}"""
print(A )
a = variation
return mean_metrics
else:
print(A )
return {variation_key: variation, target_metric_key: nan}
def __magic_name__ ( ):
'''simple docstring'''
a = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def __magic_name__ ( A : str, A : List[Any], A : int, A : Optional[Any], A : Dict ):
'''simple docstring'''
a = pd.DataFrame(A )
a = "variation"
a = "diff_%"
a = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
a = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(A ):
# as a fallback, use the minimal value as the sentinel
a = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(A ):
a = df.apply(
lambda A : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0, axis="columns", )
# re-order columns
a = [variation_key, target_metric_key, diff_key, *report_metric_keys]
a = df.reindex(A, axis="columns" ) # reorder cols
# capitalize
a = df.rename(str.capitalize, axis="columns" )
# make the cols as narrow as possible
a = df.rename(lambda A : c.replace("_", "<br>" ), axis="columns" )
a = df.rename(lambda A : c.replace("_", "\n" ), axis="columns" )
a = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=A, floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=A, floatfmt=".2f" )]
print("\n\n".join(A ) )
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd", default=A, type=A, required=A, help="Base cmd", )
parser.add_argument(
"--variations", default=A, type=A, nargs="+", required=A, help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'", )
parser.add_argument(
"--base-variation", default=A, type=A, help="Baseline variation to compare to. if None the minimal target value will be used to compare against", )
parser.add_argument(
"--target-metric-key", default=A, type=A, required=A, help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second", )
parser.add_argument(
"--report-metric-keys", default="", type=A, help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples", )
parser.add_argument(
"--repeat-times", default=1, type=A, help="How many times to re-run each variation - an average will be reported", )
parser.add_argument(
"--output_dir", default="output_benchmark", type=A, help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked", )
parser.add_argument(
"--verbose", default=A, action="store_true", help="Whether to show the outputs of each run or just the benchmark progress", )
a = parser.parse_args()
a = args.output_dir
Path(A ).mkdir(exist_ok=A )
a = get_base_command(A, A )
# split each dimension into its --foo variations
a = [list(map(str.strip, re.split(R"\|", A ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
a = list(map(str.strip, map(" ".join, itertools.product(*A ) ) ) )
a = max(len(A ) for x in variations )
# split wanted keys
a = args.report_metric_keys.split()
# capture prints into a log file for convenience
a = F"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
a = Tee(A )
print(F"""\n*** Running {len(A )} benchmarks:""" )
print(F"""Base command: {" ".join(A )}""" )
a = "variation"
a = []
for id, variation in enumerate(tqdm(A, desc="Total completion: ", leave=A ) ):
a = base_cmd + variation.split()
results.append(
process_run(
id + 1, A, A, A, A, args.target_metric_key, A, args.repeat_times, A, args.verbose, ) )
process_results(A, args.target_metric_key, A, args.base_variation, A )
if __name__ == "__main__":
main()
| 662 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase : Optional[Any] = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Path , __lowerCamelCase : Union[str, None] = None , __lowerCamelCase : Union[List[str], None] = None , __lowerCamelCase : Union[str, List[str], None] = None , __lowerCamelCase : bool = True , ) -> int:
a = [file for file in os.listdir(__lowerCamelCase ) if os.path.isfile(os.path.join(__lowerCamelCase , __lowerCamelCase ) )]
if identifier is not None:
a = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for n_ in n_identifier:
a = [file for file in files if n_ not in file]
else:
a = [file for file in files if n_identifier not in file]
a = ignore_files or []
ignore_files.append("__init__.py" )
a = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __lowerCamelCase )
if only_modules:
a = file.split("." )[0]
try:
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = doctest.DocTestSuite(__lowerCamelCase )
a = unittest.TextTestRunner().run(__lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
a = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = Path("src/transformers" )
a = "modeling"
a = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__lowerCamelCase , identifier=__lowerCamelCase , ignore_files=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Any:
a = Path("src/transformers" )
a = "tokenization"
self.analyze_directory(__lowerCamelCase , identifier=__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = Path("src/transformers" )
a = "configuration"
self.analyze_directory(__lowerCamelCase , identifier=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = Path("src/transformers" )
a = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__lowerCamelCase , n_identifier=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
a = Path("docs/source" )
a = ["favicon.ico"]
self.analyze_directory(__lowerCamelCase , ignore_files=__lowerCamelCase , only_modules=__lowerCamelCase )
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[Any] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = """dinat"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , __lowerCamelCase : int=4 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Any=[3, 4, 6, 5] , __lowerCamelCase : int=[2, 4, 8, 16] , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Any=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowerCamelCase : Dict=3.0 , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Union[str, Any]=1e-5 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : List[Any] , ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__lowerCamelCase )
a = num_heads
a = kernel_size
a = dilations
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
a = layer_scale_init_value
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 662 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
def __magic_name__ ( A : List[Any], A : List[Any], A : Any ):
'''simple docstring'''
a = UniSpeechSatForSequenceClassification.from_pretrained(A, config=A )
a = downstream_dict["projector.weight"]
a = downstream_dict["projector.bias"]
a = downstream_dict["model.post_net.linear.weight"]
a = downstream_dict["model.post_net.linear.bias"]
return model
def __magic_name__ ( A : Any, A : Tuple, A : List[str] ):
'''simple docstring'''
a = UniSpeechSatForAudioFrameClassification.from_pretrained(A, config=A )
a = downstream_dict["model.linear.weight"]
a = downstream_dict["model.linear.bias"]
return model
def __magic_name__ ( A : str, A : Optional[Any], A : List[str] ):
'''simple docstring'''
a = UniSpeechSatForXVector.from_pretrained(A, config=A )
a = downstream_dict["connector.weight"]
a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __magic_name__ ( A : List[Any], A : Optional[Any], A : int, A : List[Any] ):
'''simple docstring'''
a = torch.load(A, map_location="cpu" )
a = checkpoint["Downstream"]
a = UniSpeechSatConfig.from_pretrained(A )
a = WavaVecaFeatureExtractor.from_pretrained(
A, return_attention_mask=A, do_normalize=A )
a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a = convert_classification(A, A, A )
elif arch.endswith("ForAudioFrameClassification" ):
a = convert_diarization(A, A, A )
elif arch.endswith("ForXVector" ):
a = convert_xvector(A, A, A )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 662 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 1 |
import operator
def __magic_name__ ( A : list, A : bool = False, A : list | None = None ):
'''simple docstring'''
a = operator.lt if reverse else operator.gt
a = solution or []
if not arr:
return solution
a = [arr.pop(0 )]
for i, item in enumerate(A ):
if _operator(A, sublist[-1] ):
sublist.append(A )
arr.pop(A )
# merging sublist into solution list
if not solution:
solution.extend(A )
else:
while sublist:
a = sublist.pop(0 )
for i, xx in enumerate(A ):
if not _operator(A, A ):
solution.insert(A, A )
break
else:
solution.append(A )
strand_sort(A, A, A )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 662 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
__lowerCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __magic_name__ ( A : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(A )
a = importlib.import_module(F""".{module_name}""", "transformers.models" )
try:
return getattr(A, A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A, "__name__", A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module("transformers" )
if hasattr(A, A ):
return getattr(A, A )
return None
def __magic_name__ ( A : Union[str, os.PathLike], A : Optional[Union[str, os.PathLike]] = None, A : bool = False, A : bool = False, A : Optional[Dict[str, str]] = None, A : Optional[Union[bool, str]] = None, A : Optional[str] = None, A : bool = False, **A : str, ):
'''simple docstring'''
a = get_file_from_repo(
A, A, cache_dir=A, force_download=A, resume_download=A, proxies=A, use_auth_token=A, revision=A, local_files_only=A, )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(A, encoding="utf-8" ) as reader:
return json.load(A )
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Optional[Any]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def __UpperCAmelCase ( cls : str , __lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
a = kwargs.pop("config" , __lowerCamelCase )
a = kwargs.pop("trust_remote_code" , __lowerCamelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCamelCase , **__lowerCamelCase )
a = config_dict.get("feature_extractor_type" , __lowerCamelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
a = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
a = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__lowerCamelCase , "feature_extractor_type" , __lowerCamelCase )
if hasattr(__lowerCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__lowerCamelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
a = kwargs.pop("code_revision" , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__lowerCamelCase )]
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 662 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : str = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : str=4_04_78 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : Any=7_68 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Union[str, Any]=1e-5 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Dict="cls_index" , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[Any]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 662 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : int ) -> None:
a = value
a = None
a = None
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Node ) -> None:
a = tree
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[Any] ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = """Speech2TextFeatureExtractor"""
SCREAMING_SNAKE_CASE_ : Tuple = """Speech2TextTokenizer"""
def __init__( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
a = self.feature_extractor
a = False
def __call__( self : str , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a = kwargs.pop("raw_speech" )
else:
a = kwargs.pop("audio" , __lowerCamelCase )
a = kwargs.pop("sampling_rate" , __lowerCamelCase )
a = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
a = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a = encodings["input_ids"]
return inputs
def __UpperCAmelCase ( self : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ) -> Dict:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def __UpperCAmelCase ( self : int ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a = True
a = self.tokenizer
yield
a = self.feature_extractor
a = False
| 662 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A : Tuple, A : List[str], A : int ):
'''simple docstring'''
a = LxmertConfig.from_json_file(A )
print(F"""Building PyTorch model from configuration: {config}""" )
a = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A, A, A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), A )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 662 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : NestedDataStructureLike[PathLike] , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
a = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
a = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# Build iterable dataset
if self.streaming:
a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 662 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
from __future__ import annotations
import numpy as np
def __magic_name__ ( A : np.ndarray ):
'''simple docstring'''
a , a = np.shape(A )
if rows != columns:
a = (
"'table' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(A )
a = np.zeros((rows, columns) )
a = np.zeros((rows, columns) )
for i in range(A ):
for j in range(A ):
a = sum(lower[i][k] * upper[k][j] for k in range(A ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
a = (table[i][j] - total) / upper[j][j]
a = 1
for j in range(A, A ):
a = sum(lower[i][k] * upper[k][j] for k in range(A ) )
a = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : int = """BlipImageProcessor"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
a = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
a = self.image_processor
def __call__( self : List[str] , __lowerCamelCase : ImageInput = None , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
a = self.tokenizer
a = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
a = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
a = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
a = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def __UpperCAmelCase ( self : Optional[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ) -> str:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> List[Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 662 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCAmelCase : List[str] = 12_8022
__lowerCAmelCase : Tuple = 12_8028
@require_sentencepiece
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = MaMaaaTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
super().setUp()
a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : int ) -> int:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] ) -> str:
return (
"This is a test",
"This is a test",
)
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
a = "</s>"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Any:
a = self.get_tokenizer()
a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(__lowerCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : int ) -> Tuple:
a = self.get_tokenizer()
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2, 3, 4, 5, 6] , )
a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
a = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , "This is a test" )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
# fmt: off
a = {"input_ids": [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """facebook/m2m100_418M"""
SCREAMING_SNAKE_CASE_ : List[str] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
SCREAMING_SNAKE_CASE_ : Tuple = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __UpperCAmelCase ( cls : int ) -> Any:
a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
a = 1
return cls
def __UpperCAmelCase ( self : Any ) -> str:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_80_63 )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
a = self.tokenizer.get_vocab()
self.assertEqual(len(__lowerCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
a = "en"
a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
# fmt: off
a = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
a = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
a = tempfile.mkdtemp()
a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__lowerCamelCase )
a = MaMaaaTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , __lowerCamelCase )
@require_torch
def __UpperCAmelCase ( self : int ) -> List[str]:
a = "en"
a = "fr"
a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" )
a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[12_80_22, 58, 41_83, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_80_06,
} , )
| 662 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """deformable_detr"""
SCREAMING_SNAKE_CASE_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=3 , __lowerCamelCase : int=3_00 , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : int=6 , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : List[str]=8 , __lowerCamelCase : Any=6 , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : Dict=8 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]="relu" , __lowerCamelCase : Optional[int]=2_56 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]="sine" , __lowerCamelCase : int="resnet50" , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Any=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Tuple=False , __lowerCamelCase : List[str]=3_00 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=5 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : str=2 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.25 , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Optional[int] , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
a = backbone_config.get("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(__lowerCamelCase )
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = max_position_embeddings
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# deformable attributes
a = num_feature_levels
a = encoder_n_points
a = decoder_n_points
a = two_stage
a = two_stage_num_proposals
a = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
a = focal_alpha
a = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return self.d_model
def __UpperCAmelCase ( self : Dict ) -> Tuple:
a = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a = self.backbone_config.to_dict()
a = self.__class__.model_type
return output
| 662 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __magic_name__ ( A : Dict, A : List[str], A : List[Any] ):
'''simple docstring'''
a = state_dict.pop(A )
a = val
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a = key.replace("backbone.0.body", "backbone.conv_encoder.model" )
a = value
else:
a = value
return new_state_dict
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:256, :]
a = in_proj_bias[:256]
a = in_proj_weight[256:512, :]
a = in_proj_bias[256:512]
a = in_proj_weight[-256:, :]
a = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:256, :]
a = in_proj_bias[:256]
a = in_proj_weight[256:512, :]
a = in_proj_bias[256:512]
a = in_proj_weight[-256:, :]
a = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a = in_proj_weight_cross_attn[:256, :]
a = in_proj_bias_cross_attn[:256]
a = in_proj_weight_cross_attn[256:512, :]
a = in_proj_bias_cross_attn[256:512]
a = in_proj_weight_cross_attn[-256:, :]
a = in_proj_bias_cross_attn[-256:]
def __magic_name__ ( A : List[Any], A : Optional[int] ):
'''simple docstring'''
a , a = image.size
a = max(A, A )
a = 800 if "detection" in checkpoint_url else 1000
a = target_max_size / current_max_size
a = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __magic_name__ ( A : int ):
'''simple docstring'''
a = F.to_tensor(A )
a = F.normalize(A, mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def __magic_name__ ( A : Tuple, A : Dict, A : int ):
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
a = torch.hub.load_state_dict_from_url(A, map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(A, A, A )
a = rename_backbone_keys(A )
# query, key and value matrices need special treatment
read_in_q_k_v(A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a = state_dict.pop(A )
a = val
# create HuggingFace model and load state dict
a = TableTransformerConfig(
backbone="resnet18", mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
a = 15
a = 2
a = {0: "table", 1: "table rotated"}
a = idalabel
a = {v: k for k, v in idalabel.items()}
else:
a = 125
a = 6
a = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = DetrImageProcessor(
format="coco_detection", max_size=800 if "detection" in checkpoint_url else 1000 )
a = TableTransformerForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion
a = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
a = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=A )
a = Image.open(A ).convert("RGB" )
a = normalize(resize(A, A ) ).unsqueeze(0 )
a = model(A )
if "detection" in checkpoint_url:
a = (1, 15, 3)
a = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
a = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
a = (1, 125, 7)
a = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
a = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], A, atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], A, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
a = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(A )
image_processor.push_to_hub(A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __magic_name__ ( A : Union[str, Any], A : Dict, A : Union[str, Any]=1E-12 ):
'''simple docstring'''
a = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(A, axis=1 ), a_min=A ) ).T
a = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(A, axis=1 ), a_min=A ) ).T
return jnp.matmul(A, norm_emb_a.T )
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : CLIPConfig
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def __UpperCAmelCase ( self : int ) -> Tuple:
a = FlaxCLIPVisionModule(self.config.vision_config )
a = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
a = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
a = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
a = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
a = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , __lowerCamelCase : Optional[int] ) -> int:
a = self.vision_model(__lowerCamelCase )[1]
a = self.visual_projection(__lowerCamelCase )
a = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
a = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
a = 0.0
a = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
a = jnp.round(__lowerCamelCase , 3 )
a = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
a = is_special_care * 0.01
a = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
a = jnp.round(__lowerCamelCase , 3 )
a = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = """clip_input"""
SCREAMING_SNAKE_CASE_ : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[Any] , __lowerCamelCase : CLIPConfig , __lowerCamelCase : Optional[Tuple] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : jnp.dtype = jnp.floataa , __lowerCamelCase : bool = True , **__lowerCamelCase : Tuple , ) -> List[Any]:
if input_shape is None:
a = (1, 2_24, 2_24, 3)
a = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : jax.random.KeyArray , __lowerCamelCase : Tuple , __lowerCamelCase : FrozenDict = None ) -> FrozenDict:
# init input tensor
a = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
a , a = jax.random.split(__lowerCamelCase )
a = {"params": params_rng, "dropout": dropout_rng}
a = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : dict = None , ) -> Union[str, Any]:
a = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 662 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 1 |
def __magic_name__ ( A : list, A : list ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A, A ) ) )
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
if point:
if isinstance(A, A ):
for item in point:
if not isinstance(A, (int, float) ):
a = (
"Expected a list of numbers as input, found "
F"""{type(A ).__name__}"""
)
raise TypeError(A )
else:
a = F"""Expected a list of numbers as input, found {type(A ).__name__}"""
raise TypeError(A )
else:
raise ValueError("Missing an input" )
def __magic_name__ ( A : list, A : list ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A, A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = """sew-d"""
def __init__( self : Tuple , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : int=7_68 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : str=5_12 , __lowerCamelCase : Any=2_56 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : int=("p2c", "c2p") , __lowerCamelCase : List[Any]="layer_norm" , __lowerCamelCase : Any="gelu_python" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Optional[int]=1e-7 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Union[str, Any]="group" , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , __lowerCamelCase : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Dict=1_28 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : str=0 , __lowerCamelCase : Any="mean" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Any=2_56 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Optional[int]=2 , **__lowerCamelCase : Optional[int] , ) -> Optional[int]:
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(__lowerCamelCase )
a = list(__lowerCamelCase )
a = list(__lowerCamelCase )
a = conv_bias
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = squeeze_factor
a = max_position_embeddings
a = position_buckets
a = share_att_key
a = relative_attention
a = norm_rel_ebd
a = list(__lowerCamelCase )
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layer_norm_eps
a = feature_layer_norm_eps
a = initializer_range
a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# sequence classification
a = use_weighted_layer_sum
a = classifier_proj_size
@property
def __UpperCAmelCase ( self : List[str] ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 662 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """mobilenet_v1"""
def __init__( self : Tuple , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=2_24 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : List[str]=8 , __lowerCamelCase : List[Any]="relu6" , __lowerCamelCase : int=True , __lowerCamelCase : str=0.999 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=0.001 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a = num_channels
a = image_size
a = depth_multiplier
a = min_depth
a = hidden_act
a = tf_padding
a = classifier_dropout_prob
a = initializer_range
a = layer_norm_eps
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse("""1.11""" )
@property
def __UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __UpperCAmelCase ( self : Dict ) -> float:
return 1e-4
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 1 |
from typing import Any
import numpy as np
def __magic_name__ ( A : np.ndarray ):
'''simple docstring'''
return np.array_equal(A, matrix.conjugate().T )
def __magic_name__ ( A : np.ndarray, A : np.ndarray ):
'''simple docstring'''
a = v.conjugate().T
a = v_star.dot(A )
assert isinstance(A, np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __magic_name__ ( ):
'''simple docstring'''
a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a = np.array([[1], [2], [3]] )
assert is_hermitian(A ), F"""{a} is not hermitian."""
print(rayleigh_quotient(A, A ) )
a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), F"""{a} is not hermitian."""
assert rayleigh_quotient(A, A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 88 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 32 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "geglu" , __lowerCamelCase : Optional[int] = None , ) -> Tuple:
super().__init__()
a = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__lowerCamelCase , attention_head_dim=__lowerCamelCase , in_channels=__lowerCamelCase , num_layers=__lowerCamelCase , dropout=__lowerCamelCase , norm_num_groups=__lowerCamelCase , cross_attention_dim=__lowerCamelCase , attention_bias=__lowerCamelCase , sample_size=__lowerCamelCase , num_vector_embeds=__lowerCamelCase , activation_fn=__lowerCamelCase , num_embeds_ada_norm=__lowerCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a = [1, 0]
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : bool = True , ) -> int:
a = hidden_states
a = []
a = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a = self.transformer_index_for_condition[i]
a = self.transformers[transformer_index](
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , timestep=__lowerCamelCase , cross_attention_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__lowerCamelCase )
| 662 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 1 |
from typing import Any
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Any ) -> Optional[int]:
a = data
a = None
class snake_case__ :
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
a = None
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.head
while temp is not None:
print(temp.data , end=" " )
a = temp.next
print()
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any ) -> Any:
a = Node(__lowerCamelCase )
a = self.head
a = new_node
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
if node_data_a == node_data_a:
return
else:
a = self.head
while node_a is not None and node_a.data != node_data_a:
a = node_a.next
a = self.head
while node_a is not None and node_a.data != node_data_a:
a = node_a.next
if node_a is None or node_a is None:
return
a , a = node_a.data, node_a.data
if __name__ == "__main__":
__lowerCAmelCase : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 662 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.