code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase__ ={1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase__ ( nn.Module ):
def __init__(self : Dict , snake_case_ : Optional[int] ):
super().__init__()
__a : str = torchvision.models.resnetaaa(pretrained=lowerCamelCase__ )
__a : Any = list(model.children() )[:-2]
__a : List[Any] = nn.Sequential(*lowerCamelCase__ )
__a : Any = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCAmelCase (self : List[Any] , snake_case_ : Any ):
__a : Optional[int] = self.pool(self.model(lowerCamelCase__ ) )
__a : str = torch.flatten(lowerCamelCase__ , start_dim=2 )
__a : Optional[int] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase__ ( __lowerCAmelCase ):
def __init__(self : Optional[int] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : str ):
__a : Dict = [json.loads(lowerCamelCase__ ) for l in open(lowerCamelCase__ )]
__a : List[Any] = os.path.dirname(lowerCamelCase__ )
__a : Optional[int] = tokenizer
__a : Optional[Any] = labels
__a : Optional[int] = len(lowerCamelCase__ )
__a : Dict = max_seq_length
__a : str = transforms
def __len__(self : str ):
return len(self.data )
def __getitem__(self : List[str] , snake_case_ : List[str] ):
__a : Any = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowerCamelCase__ ) )
__a : List[str] = sentence[0], sentence[1:-1], sentence[-1]
__a : List[Any] = sentence[: self.max_seq_length]
__a : Optional[Any] = torch.zeros(self.n_classes )
__a : int = 1
__a : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__a : str = self.transforms(lowerCamelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCAmelCase (self : Dict ):
__a : int = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
__a : Optional[int] = [len(row['''sentence'''] ) for row in batch]
__a : Optional[Any] = len(lowerCAmelCase__ ), max(lowerCAmelCase__ )
__a : Optional[int] = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
__a : Dict = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ):
__a : Any = input_row['''sentence''']
__a : Optional[Any] = 1
__a : Union[str, Any] = torch.stack([row['''image'''] for row in batch] )
__a : Dict = torch.stack([row['''label'''] for row in batch] )
__a : Any = torch.stack([row['''image_start_token'''] for row in batch] )
__a : int = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 216 |
def _a ( SCREAMING_SNAKE_CASE : int = 1000000 ):
"""simple docstring"""
UpperCamelCase__ : Any = set(range(3 , SCREAMING_SNAKE_CASE , 2 ) )
primes.add(2 )
for p in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase__ : Union[str, Any] = [float(SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 146 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Load configuration defined in the metadata file
with open(snake_case_ ) as metadata_file:
_A : List[str] = json.load(snake_case_ )
_A : str = LukeConfig(use_entity_aware_attention=snake_case_,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_A : List[str] = torch.load(snake_case_,map_location="""cpu""" )
# Load the entity vocab file
_A : Dict = load_entity_vocab(snake_case_ )
_A : Tuple = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_A : List[Any] = AddedToken("""<ent>""",lstrip=snake_case_,rstrip=snake_case_ )
_A : List[Any] = AddedToken("""<ent2>""",lstrip=snake_case_,rstrip=snake_case_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ),"""w""" ) as f:
json.dump(snake_case_,snake_case_ )
_A : Any = LukeTokenizer.from_pretrained(snake_case_ )
# Initialize the embeddings of the special tokens
_A : str = state_dict["""embeddings.word_embeddings.weight"""]
_A : int = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_A : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_A : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_A : Any = f'''encoder.layer.{layer_index}.attention.self.'''
_A : Optional[Any] = state_dict[prefix + matrix_name]
_A : List[Any] = state_dict[prefix + matrix_name]
_A : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_A : str = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_A : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]]
_A : Dict = LukeModel(config=snake_case_ ).eval()
_A , _A : Tuple = model.load_state_dict(snake_case_,strict=snake_case_ )
if not (len(snake_case_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(snake_case_ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
_A : str = LukeTokenizer.from_pretrained(snake_case_,task="""entity_classification""" )
_A : Dict = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_A : int = (39, 42)
_A : Optional[int] = tokenizer(snake_case_,entity_spans=[span],add_prefix_space=snake_case_,return_tensors="""pt""" )
_A : List[Any] = model(**snake_case_ )
# Verify word hidden states
if model_size == "large":
_A : List[str] = torch.Size((1, 42, 1024) )
_A : Optional[Any] = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
_A : Tuple = torch.Size((1, 42, 768) )
_A : str = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3],snake_case_,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_A : Optional[Any] = torch.Size((1, 1, 1024) )
_A : str = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
_A : int = torch.Size((1, 1, 768) )
_A : Dict = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3],snake_case_,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case_ ) )
model.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = {}
with open(snake_case_,"""r""",encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case_ ):
_A , _A : List[str] = line.rstrip().split("""\t""" )
_A : str = index
return entity_vocab
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 343 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase_ ( __A ):
__UpperCAmelCase = ['input_features', 'attention_mask']
def __init__( self , a=80 , a=1_60_00 , a=0.0 , a=10 , a=25 , a="hamming_window" , a=3_27_68.0 , a=0.97 , a=1.0 , a=True , a=True , a=False , **a , ):
super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a )
UpperCamelCase__ = feature_size
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = padding_value
UpperCamelCase__ = hop_length
UpperCamelCase__ = win_length
UpperCamelCase__ = frame_signal_scale
UpperCamelCase__ = preemphasis_coeff
UpperCamelCase__ = mel_floor
UpperCamelCase__ = normalize_means
UpperCamelCase__ = normalize_vars
UpperCamelCase__ = win_function
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = win_length * sampling_rate // 10_00
UpperCamelCase__ = hop_length * sampling_rate // 10_00
UpperCamelCase__ = optimal_fft_length(self.sample_size )
UpperCamelCase__ = (self.n_fft // 2) + 1
def __a ( self , a ):
if self.win_function == "hamming_window":
UpperCamelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=a )
else:
UpperCamelCase__ = window_function(window_length=self.sample_size , name=self.win_function )
UpperCamelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCamelCase__ = spectrogram(
one_waveform * self.frame_signal_scale , window=a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=a , preemphasis=self.preemphasis_coeff , mel_filters=a , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def __a ( self , a , a , a ):
if self.normalize_means:
UpperCamelCase__ = x[:input_length].mean(axis=0 )
UpperCamelCase__ = np.subtract(a , a )
if self.normalize_vars:
UpperCamelCase__ = x[:input_length].std(axis=0 )
UpperCamelCase__ = np.divide(a , a )
if input_length < x.shape[0]:
UpperCamelCase__ = padding_value
# make sure array is in float32
UpperCamelCase__ = x.astype(np.floataa )
return x
def __a ( self , a , a = None ):
UpperCamelCase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(a , a , self.padding_value ) for x, n in zip(a , a )]
def __call__( self , a , a = False , a = None , a = False , a = None , a = None , a = None , a = None , **a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
UpperCamelCase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ = [raw_speech]
# extract fbank features
UpperCamelCase__ = [self._extract_mfsc_features(a ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase__ = BatchFeature({"input_features": features} )
UpperCamelCase__ = self.pad(
a , padding=a , max_length=a , truncation=a , pad_to_multiple_of=a , return_attention_mask=a , **a , )
# make sure list is in array format
UpperCamelCase__ = padded_inputs.get("input_features" )
if isinstance(input_features[0] , a ):
UpperCamelCase__ = [np.asarray(a , dtype=np.floataa ) for feature in input_features]
UpperCamelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCamelCase__ = [np.asarray(a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCamelCase__ = (
np.array(a , dtype=np.intaa )
if self._get_padding_strategies(a , max_length=a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCamelCase__ = self.normalize(
padded_inputs["input_features"] , attention_mask=a )
if return_tensors is not None:
UpperCamelCase__ = padded_inputs.convert_to_tensors(a )
return padded_inputs
| 80 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 76 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCamelCase_ : Dict = sys.version_info >= (3, 1_0)
def _A ( lowercase=None , lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowercase )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = field(default="toto", metadata={"help": "help message"} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = None
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "titi"
__lowerCAmelCase = "toto"
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "titi"
__lowerCAmelCase = "toto"
__lowerCAmelCase = 42
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =BasicEnum(self.foo )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =MixedTypeEnum(self.foo )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "help message"} )
__lowerCAmelCase = None
__lowerCAmelCase = list_field(default=[] )
__lowerCAmelCase = list_field(default=[] )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = list_field(default=[] )
__lowerCAmelCase = list_field(default=[1, 2, 3] )
__lowerCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
__lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field()
__lowerCAmelCase = field()
__lowerCAmelCase = field()
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =BasicEnum(self.required_enum )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = field()
__lowerCAmelCase = None
__lowerCAmelCase = field(default="toto", metadata={"help": "help message"} )
__lowerCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = None
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "help message"} )
__lowerCAmelCase = None
__lowerCAmelCase = list_field(default=[] )
__lowerCAmelCase = list_field(default=[] )
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Optional[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
a ={k: v for k, v in vars(__A ).items() if k != '''container'''}
a ={k: v for k, v in vars(__A ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __A ) and yy.get('''choices''' , __A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__A ) , yy['''type'''](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =HfArgumentParser(__A )
a =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__A , required=__A )
expected.add_argument('''--bar''' , type=__A , required=__A )
expected.add_argument('''--baz''' , type=__A , required=__A )
expected.add_argument('''--flag''' , type=__A , default=__A , const=__A , nargs='''?''' )
self.argparsersEqual(__A , __A )
a =['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((a ) , ) =parser.parse_args_into_dataclasses(__A , look_for_args_file=__A )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =HfArgumentParser(__A )
a =argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__A )
expected.add_argument('''--baz''' , default='''toto''' , type=__A , help='''help message''' )
self.argparsersEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__A , default=__A , const=__A , nargs='''?''' )
expected.add_argument('''--baz''' , type=__A , default=__A , const=__A , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__A , dest='''baz''' )
expected.add_argument('''--opt''' , type=__A , default=__A )
a =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
a =HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
a =parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
a =parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
a =parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
a =parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
a =parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =HfArgumentParser(__A )
a =argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__A , __A )
a =parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
a =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
a =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
a =parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
a =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
a =parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE ( self ) -> int:
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = "toto"
a =HfArgumentParser(__A )
a =argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__A , __A )
a =parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
a =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
a =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =HfArgumentParser(__A )
a =argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__A )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__A )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__A )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__A )
self.argparsersEqual(__A , __A )
a =parser.parse_args([] )
self.assertEqual(
__A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
a =parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__A , type=__A )
expected.add_argument('''--bar''' , default=__A , type=__A , help='''help message''' )
expected.add_argument('''--baz''' , default=__A , type=__A )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__A )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__A )
a =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
a =HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
a =parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , bar=__A , baz=__A , ces=[] , des=[] ) )
a =parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__A , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =HfArgumentParser(__A )
a =argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__A , required=__A )
expected.add_argument('''--required_str''' , type=__A , required=__A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__A , )
self.argparsersEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =HfArgumentParser(__A )
a =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__A , required=__A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__A , )
expected.add_argument('''--opt''' , type=__A , default=__A )
expected.add_argument('''--baz''' , default='''toto''' , type=__A , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__A )
self.argparsersEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =HfArgumentParser(__A )
a ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
a =parser.parse_dict(__A )[0]
a =BasicExample(**__A )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =HfArgumentParser(__A )
a ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__A , parser.parse_dict , __A , allow_extra_keys=__A )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =HfArgumentParser(__A )
a ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a =os.path.join(__A , '''temp_json''' )
os.mkdir(__A )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__A , __A )
a =parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
a =BasicExample(**__A )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =HfArgumentParser(__A )
a ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a =os.path.join(__A , '''temp_yaml''' )
os.mkdir(__A )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__A , __A )
a =parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
a =BasicExample(**__A )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =HfArgumentParser(__A )
self.assertIsNotNone(__A ) | 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Union[str, Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = XLMRobertaTokenizer
lowercase__ = XLMRobertaTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Tuple = XLMRobertaTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = '<pad>'
_UpperCamelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(lowerCamelCase__ ) ,1002 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1002 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = XLMRobertaTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
_UpperCamelCase : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
_UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tempfile.mkdtemp()
_UpperCamelCase : str = tokenizer_r.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_UpperCamelCase : str = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase : int = tokenizer_r.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : str = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase : str = tempfile.mkdtemp()
_UpperCamelCase : Any = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase : Tuple = tokenizer_r.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : str = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : Dict = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
_UpperCamelCase : int = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ ,f.name )
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer(f.name ,keep_accents=lowerCamelCase__ )
_UpperCamelCase : str = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer()
_UpperCamelCase : Any = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : str = tokenizer.tokenize(lowerCamelCase__ )
_UpperCamelCase : Any = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Tuple = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
_UpperCamelCase : Union[str, Any] = tokenizer.encode(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'Hello World!'
_UpperCamelCase : str = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase__ ,self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_UpperCamelCase : str = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase__ ,self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# fmt: off
_UpperCamelCase : Union[str, Any] = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='xlm-roberta-base' ,revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' ,)
| 83 |
import numpy
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case_ = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case_ = numpy.zeros(output_array.shape )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self : Optional[Any] , __lowercase : numpy.ndarray , __lowercase : int , __lowercase : bool ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
snake_case_ = self.feedforward()
self.back_propagation()
if give_loss:
snake_case_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def snake_case__ ( self : Union[str, Any] , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_arr
snake_case_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (value) * (1 - (value))
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case_ = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 187 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCamelCase_ : int = logging.getLogger(__name__)
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Tuple=None , snake_case_ : Optional[int]=None ):
UpperCamelCase_: List[str] = self.layer[current_layer](snake_case_ , snake_case_ , head_mask[current_layer] )
UpperCamelCase_: Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __UpperCamelCase , )
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ )
UpperCamelCase_: int = BertEncoderWithPabee(snake_case_ )
self.init_weights()
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: int = 0
UpperCamelCase_: str = 0
UpperCamelCase_: List[str] = 0
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Tuple ):
UpperCamelCase_: List[Any] = threshold
def lowerCAmelCase__ ( self : Any , snake_case_ : str ):
UpperCamelCase_: Any = patience
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Dict = 0
UpperCamelCase_: List[Any] = 0
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[int] = self.inference_layers_num / self.inference_instances_num
UpperCamelCase_: Optional[Any] = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(snake_case_ )
@add_start_docstrings_to_model_forward(snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : str=None , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : int=None , snake_case_ : List[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None , snake_case_ : Optional[int]=None , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : Optional[int]=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
UpperCamelCase_: List[Any] = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase_: List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
UpperCamelCase_: Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase_: str = torch.ones(snake_case_ , device=snake_case_ )
if token_type_ids is None:
UpperCamelCase_: int = torch.zeros(snake_case_ , dtype=torch.long , device=snake_case_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase_: torch.Tensor = self.get_extended_attention_mask(snake_case_ , snake_case_ , snake_case_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCamelCase_: Union[str, Any] = encoder_hidden_states.size()
UpperCamelCase_: Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCamelCase_: Optional[int] = torch.ones(snake_case_ , device=snake_case_ )
UpperCamelCase_: str = self.invert_attention_mask(snake_case_ )
else:
UpperCamelCase_: int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase_: Optional[Any] = self.get_head_mask(snake_case_ , self.config.num_hidden_layers )
UpperCamelCase_: Optional[Any] = self.embeddings(
input_ids=snake_case_ , position_ids=snake_case_ , token_type_ids=snake_case_ , inputs_embeds=snake_case_ )
UpperCamelCase_: Tuple = embedding_output
if self.training:
UpperCamelCase_: Optional[Any] = []
for i in range(self.config.num_hidden_layers ):
UpperCamelCase_: Dict = self.encoder.adaptive_forward(
snake_case_ , current_layer=snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ )
UpperCamelCase_: Optional[Any] = self.pooler(snake_case_ )
UpperCamelCase_: Any = output_layers[i](output_dropout(snake_case_ ) )
res.append(snake_case_ )
elif self.patience == 0: # Use all layers for inference
UpperCamelCase_: List[Any] = self.encoder(
snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
UpperCamelCase_: List[Any] = self.pooler(encoder_outputs[0] )
UpperCamelCase_: Union[str, Any] = [output_layers[self.config.num_hidden_layers - 1](snake_case_ )]
else:
UpperCamelCase_: Dict = 0
UpperCamelCase_: Optional[int] = None
UpperCamelCase_: Optional[int] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCamelCase_: Optional[int] = self.encoder.adaptive_forward(
snake_case_ , current_layer=snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.pooler(snake_case_ )
UpperCamelCase_: Optional[Any] = output_layers[i](snake_case_ )
if regression:
UpperCamelCase_: List[Any] = logits.detach()
if patient_result is not None:
UpperCamelCase_: int = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCamelCase_: Any = 0
else:
UpperCamelCase_: Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCamelCase_: str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case_ ) ):
patient_counter += 1
else:
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: List[Any] = logits
if patient_counter == self.patience:
break
UpperCamelCase_: Any = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , __UpperCamelCase , )
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ )
UpperCamelCase_: Optional[int] = config.num_labels
UpperCamelCase_: Tuple = BertModelWithPabee(snake_case_ )
UpperCamelCase_: List[str] = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase_: Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int=None , snake_case_ : int=None , snake_case_ : Tuple=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , ):
UpperCamelCase_: Dict = self.bert(
input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCamelCase_: int = (logits[-1],)
if labels is not None:
UpperCamelCase_: Any = None
UpperCamelCase_: Union[str, Any] = 0
for ix, logits_item in enumerate(snake_case_ ):
if self.num_labels == 1:
# We are doing regression
UpperCamelCase_: Dict = MSELoss()
UpperCamelCase_: Dict = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase_: Optional[Any] = CrossEntropyLoss()
UpperCamelCase_: int = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCamelCase_: Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCamelCase_: Tuple = (total_loss / total_weights,) + outputs
return outputs
| 356 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : str = "utf-8"
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True # deprecated
__UpperCamelCase : Optional[int] = None # deprecated
__UpperCamelCase : int = 10 << 20 # 10MB
__UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Tuple = JsonConfig
def lowerCAmelCase__ ( self : int ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase_: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase_: Dict = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = [files]
UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Dict = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase_: Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase_: Optional[int] = dataset
UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase_: Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase_: int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase_: Tuple = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Optional[Any] = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 223 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> List[str]:
'''simple docstring'''
require_version(deps[pkg] , lowercase__ )
| 295 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def a__ ( snake_case__ ) -> bytes:
if len(snake_case__ ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCamelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( snake_case__ ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase = format(snake_case__ , """08x""" )[-8:]
lowerCamelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def a__ ( snake_case__ ) -> bytes:
lowerCamelCase = b""""""
for char in message:
bit_string += format(snake_case__ , """08b""" ).encode("""utf-8""" )
lowerCamelCase = format(len(snake_case__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def a__ ( snake_case__ ) -> Generator[list[int], None, None]:
if len(snake_case__ ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(snake_case__ ) , 5_12 ):
lowerCamelCase = bit_string[pos : pos + 5_12]
lowerCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def a__ ( snake_case__ ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase = format(snake_case__ , """032b""" )
lowerCamelCase = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2 )
def a__ ( snake_case__ , snake_case__ ) -> int:
return (a + b) % 2**32
def a__ ( snake_case__ , snake_case__ ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def a__ ( snake_case__ ) -> bytes:
lowerCamelCase = preprocess(snake_case__ )
lowerCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase = 0x6745_2301
lowerCamelCase = 0xefcd_ab89
lowerCamelCase = 0x98ba_dcfe
lowerCamelCase = 0x1032_5476
lowerCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__ ):
lowerCamelCase = aa
lowerCamelCase = ba
lowerCamelCase = ca
lowerCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase = d ^ (b & (c ^ d))
lowerCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase = c ^ (d & (b ^ c))
lowerCamelCase = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase = b ^ c ^ d
lowerCamelCase = (3 * i + 5) % 16
else:
lowerCamelCase = c ^ (b | not_aa(snake_case__ ))
lowerCamelCase = (7 * i) % 16
lowerCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase = d
lowerCamelCase = c
lowerCamelCase = b
lowerCamelCase = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase = sum_aa(snake_case__ , snake_case__ )
lowerCamelCase = sum_aa(snake_case__ , snake_case__ )
lowerCamelCase = sum_aa(snake_case__ , snake_case__ )
lowerCamelCase = sum_aa(snake_case__ , snake_case__ )
lowerCamelCase = reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = False, False, False
@dataclass
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__UpperCamelCase = field(default="Audio" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase = BytesIO()
sf.write(_a , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCamelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCamelCase = BytesIO(bytes() )
sf.write(_a , _a , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCamelCase , lowerCamelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCamelCase = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCamelCase = token_per_repo_id or {}
lowerCamelCase = path.split("""::""" )[-1]
try:
lowerCamelCase = string_to_dict(_a , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase = None
with xopen(_a , """rb""" , use_auth_token=_a ) as f:
lowerCamelCase , lowerCamelCase = sf.read(_a )
else:
lowerCamelCase , lowerCamelCase = sf.read(_a )
lowerCamelCase = array.T
if self.mono:
lowerCamelCase = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCAmelCase ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCamelCase = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCamelCase = storage.field("""bytes""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCamelCase = storage.field("""path""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a , """rb""" ) as f:
lowerCamelCase = f.read()
return bytes_
lowerCamelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 168 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Optional[Any]=2_8_1_2_3 ):
lowercase_ : Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ : Any = set()
lowercase_ : int = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCamelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 33 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=2 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=2 , ):
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : Optional[int] = max_length
UpperCAmelCase__ : int = num_mel_bins
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : str = frequency_stride
UpperCAmelCase__ : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ : Dict = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ : Dict = num_patches + 2
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Dict = self.get_config()
return config, input_values, labels
def snake_case__ ( self):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = ASTModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Any = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase :List[str] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase :List[Any] = False
lowerCAmelCase :Any = False
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :int = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = ASTModelTester(self)
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""")
def snake_case__ ( self):
pass
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear))
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_lowerCamelCase)
UpperCAmelCase__ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = ASTModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
def _UpperCamelCase ( ):
UpperCAmelCase__ : Dict = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase__ , UpperCAmelCase__ : int = torchaudio.load(UpperCamelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""")
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.default_feature_extractor
UpperCAmelCase__ : List[str] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""").to(_lowerCamelCase)
UpperCAmelCase__ : str = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ : Dict = prepare_audio()
UpperCAmelCase__ : Dict = audio.squeeze().numpy()
UpperCAmelCase__ : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors="""pt""").to(_lowerCamelCase)
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase)
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , _lowerCamelCase)
UpperCAmelCase__ : Tuple = torch.tensor([-0.8760, -7.0042, -8.6602]).to(_lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4)) | 163 | 0 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : list[str] ) -> str:
lowerCamelCase_ : Optional[Any] =""
for word_or_phrase in separated:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 209 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
A__ : List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
A__ : Optional[int] = {
'facebook/blenderbot_small-90M': 512,
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Tuple = BlenderbotSmallTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : str=None , snake_case__ : Any="<|endoftext|>" , snake_case__ : Tuple="<|endoftext|>" , snake_case__ : Tuple="<|endoftext|>" , snake_case__ : str=False , snake_case__ : int=True , **snake_case__ : Tuple , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Optional[int] =add_prefix_space
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[str]=None ):
lowerCamelCase_ : Optional[Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : int =[self.sep_token_id]
lowerCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 209 | 1 |
import math
import sys
import cva
import numpy as np
def a ( A__ : np.ndarray , A__ : float ) -> np.ndarray:
"""simple docstring"""
_lowercase =math.sqrt(snake_case__ )
_lowercase =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a ( A__ : np.ndarray , A__ : int , A__ : int , A__ : int ) -> np.ndarray:
"""simple docstring"""
_lowercase =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a ( A__ : int , A__ : float ) -> np.ndarray:
"""simple docstring"""
_lowercase =np.zeros((kernel_size, kernel_size) )
for i in range(0 , snake_case__ ):
for j in range(0 , snake_case__ ):
_lowercase =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case__ , snake_case__ )
def a ( A__ : np.ndarray , A__ : float , A__ : float , A__ : int , ) -> np.ndarray:
"""simple docstring"""
_lowercase =np.zeros(img.shape )
_lowercase =get_gauss_kernel(snake_case__ , snake_case__ )
_lowercase , _lowercase =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_lowercase =get_slice(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowercase =img_s - img_s[kernel_size // 2, kernel_size // 2]
_lowercase =vec_gaussian(snake_case__ , snake_case__ )
_lowercase =np.multiply(snake_case__ , snake_case__ )
_lowercase =np.multiply(snake_case__ , snake_case__ )
_lowercase =np.sum(snake_case__ ) / np.sum(snake_case__ )
_lowercase =val
return imga
def a ( A__ : list ) -> tuple:
"""simple docstring"""
_lowercase =args[1] if args[1:] else '../image_data/lena.jpg'
_lowercase =float(args[2] ) if args[2:] else 1.0
_lowercase =float(args[3] ) if args[3:] else 1.0
if args[4:]:
_lowercase =int(args[4] )
_lowercase =kernel_size + abs(kernel_size % 2 - 1 )
else:
_lowercase =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase_ , lowercase_ , lowercase_ , lowercase_ = parse_args(sys.argv)
lowercase_ = cva.imread(filename, 0)
cva.imshow('input image', img)
lowercase_ = img / 2_5_5
lowercase_ = out.astype('float32')
lowercase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase_ = out * 2_5_5
lowercase_ = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 205 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCAmelCase = BlipProcessor(lowerCAmelCase , lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).tokenizer
def __lowercase ( self : List[Any] , **lowerCAmelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def __lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : List[str] ):
lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowerCAmelCase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowerCAmelCase )
lowerCAmelCase = tokenizer(lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowerCAmelCase )
lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 155 | 0 |
'''simple docstring'''
import random
class A__ :
@staticmethod
def A ( _a : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[ord(_a ) for i in text]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i in plain:
_SCREAMING_SNAKE_CASE =random.randint(1 , 300 )
_SCREAMING_SNAKE_CASE =(i + k) * k
cipher.append(_a )
key.append(_a )
return cipher, key
@staticmethod
def A ( _a : list[int] , _a : list[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_a ) )
return "".join(_a )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase : Optional[Any] = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 114 |
'''simple docstring'''
import os
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =os.path.dirname(os.path.realpath(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , 'triangle.txt' )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =[]
for line in triangle:
_SCREAMING_SNAKE_CASE =[]
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_UpperCamelCase ) )
a.append(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(len(a[i] ) ):
_SCREAMING_SNAKE_CASE =a[i - 1][j] if j != len(a[i - 1] ) else 0
_SCREAMING_SNAKE_CASE =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCamelCase , _UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 114 | 1 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ ( lowerCamelCase ):
def __init__( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""bs4"""] )
super().__init__(**_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase = parent.find_all(child.name , recursive=_SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(_SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = BeautifulSoup(_SCREAMING_SNAKE_CASE , """html.parser""" )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for element in html_code.descendants:
if type(_SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase = html.unescape(_SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.xpath_soup(_SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(_SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = """"""
for tagname, subs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self , _SCREAMING_SNAKE_CASE ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase = False
# Check that strings has a valid type
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(_SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"but is of type {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = bool(isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , _SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase = [html_strings]
# Get nodes + xpaths
UpperCamelCase = []
UpperCamelCase = []
for html_string in html_strings:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.get_three_from_single(_SCREAMING_SNAKE_CASE )
nodes.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for node, tag_list, sub_list in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = self.construct_xpath(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
xpath_strings.append(_SCREAMING_SNAKE_CASE )
xpaths.append(_SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_inputs
| 321 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_snake_case : Dict = logging.get_logger(__name__)
def a_ ( ):
__lowerCAmelCase = os.getenv('SM_HP_MP_PARAMETERS', '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv('SM_FRAMEWORK_PARAMS', '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_A )
if not mpi_options.get('sagemaker_mpi_enabled', _A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def lowercase ( self : List[str] ) -> Dict:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , snake_case__ , )
@cached_property
def lowercase ( self : str ) -> int:
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
__lowerCAmelCase = torch.device('cpu' )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device('cuda' , snake_case__ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
__lowerCAmelCase = torch.device('cuda' , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device('cuda' , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case__ )
return device
@property
def lowercase ( self : List[Any] ) -> Optional[int]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowercase ( self : List[Any] ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def lowercase ( self : Dict ) -> Optional[Any]:
return False
| 353 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DanceDiffusionPipeline
a_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase_ , use_timestep_embedding=lowerCAmelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__lowerCAmelCase = IPNDMScheduler()
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=0 ) -> Any:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = DanceDiffusionPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def lowercase ( self : List[str] ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowercase ( self : str ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase ( self : List[Any] ) -> List[str]:
return super().test_attention_slicing_forward_pass()
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 207 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = '▁'
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = BertGenerationTokenizer
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Union[str, Any] = True
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
lowerCAmelCase_ :Dict = BertGenerationTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = """<s>"""
lowerCAmelCase_ :Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__A ) , 1002 )
def __lowerCAmelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = BertGenerationTokenizer(__A , keep_accents=__A )
lowerCAmelCase_ :str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ :Dict = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = """Hello World!"""
lowerCAmelCase_ :Optional[Any] = [1_8536, 2260, 101]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase_ :Tuple = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase_ :Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ :str = """ """.join(__A )
lowerCAmelCase_ :Tuple = self.big_tokenizer.encode_plus(__A , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase_ :Any = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase_ :int = BertGenerationConfig()
lowerCAmelCase_ :Tuple = BertGenerationEncoder(__A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
lowerCAmelCase_ :Tuple = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 84 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Union[str, Any]:
"""simple docstring"""
a_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a_ = ''
else:
a_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[
: config.hidden_size, :
]
a_ = in_proj_bias[: config.hidden_size]
a_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ = in_proj_weight[
-config.hidden_size :, :
]
a_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = dct.pop(UpperCAmelCase_ )
a_ = val
def UpperCamelCase ( ) ->Union[str, Any]:
"""simple docstring"""
a_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) ->List[Any]:
"""simple docstring"""
a_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
a_ = 8
# set labels if required
if not base_model:
a_ = 1_000
a_ = 'huggingface/label-files'
a_ = 'imagenet-1k-id2label.json'
a_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a_ = 384
a_ = 1_536
a_ = 12
a_ = 6
# load original model from torch hub
a_ = torch.hub.load("facebookresearch/dino:main" , UpperCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a_ = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
a_ = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
if base_model:
a_ = ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ ).eval()
else:
a_ = ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
a_ = ViTImageProcessor()
a_ = image_processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding['pixel_values']
a_ = model(UpperCAmelCase_ )
if base_model:
a_ = original_model(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
a_ = original_model(UpperCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 357 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 303 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _a :
def __init__( self : List[str] , lowercase : Optional[int] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : int=None , lowercase : List[str]="resnet50" , lowercase : Optional[Any]=3 , lowercase : str=32 , lowercase : List[Any]=3 , lowercase : int=True , lowercase : Any=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = out_indices if out_indices is not None else [4]
UpperCAmelCase = stage_names
UpperCAmelCase = out_features
UpperCAmelCase = backbone
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_pretrained_backbone
UpperCAmelCase = is_training
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def A ( self : Optional[int] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = TimmBackbone(config=lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _a ( __a , __a , __a , unittest.TestCase ):
__a : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
__a : Dict = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__a : Optional[Any] = False
__a : List[str] = False
__a : int = False
__a : Optional[Any] = False
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = TimmBackboneModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : str ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = '''resnet18'''
UpperCAmelCase = '''microsoft/resnet-18'''
UpperCAmelCase = AutoBackbone.from_pretrained(lowercase , use_timm_backbone=lowercase )
UpperCAmelCase = AutoBackbone.from_pretrained(lowercase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowercase , use_timm_backbone=lowercase , out_indices=[1, 2, 3] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowercase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def A ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def A ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def A ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : int ):
'''simple docstring'''
pass
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase = self.all_model_classes[0]
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase )
UpperCAmelCase = model(**lowercase )
UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(**lowercase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase = copy.deepcopy(lowercase )
UpperCAmelCase = None
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(**lowercase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCAmelCase = copy.deepcopy(lowercase )
UpperCAmelCase = False
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(**lowercase )
| 34 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'van'
def __init__( self : List[str] , _lowerCamelCase : Dict=224 , _lowerCamelCase : str=3 , _lowerCamelCase : List[str]=[7, 3, 3, 3] , _lowerCamelCase : Tuple=[4, 2, 2, 2] , _lowerCamelCase : Union[str, Any]=[64, 128, 320, 512] , _lowerCamelCase : Optional[int]=[3, 3, 12, 3] , _lowerCamelCase : str=[8, 8, 4, 4] , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Any=0.02 , _lowerCamelCase : str=1E-6 , _lowerCamelCase : Union[str, Any]=1E-2 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Any=0.0 , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : int = image_size
A_ : int = num_channels
A_ : Any = patch_sizes
A_ : List[Any] = strides
A_ : Tuple = hidden_sizes
A_ : Any = depths
A_ : Dict = mlp_ratios
A_ : Optional[int] = hidden_act
A_ : str = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = layer_scale_init_value
A_ : Optional[int] = drop_path_rate
A_ : Optional[Any] = dropout_rate
| 360 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 0 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :Optional[Any] , __UpperCamelCase :list ):
A = set_counts
A = max(__UpperCamelCase )
A = len(__UpperCamelCase )
A = [1] * num_sets
A = list(range(__UpperCamelCase ) )
def lowerCamelCase ( self :str , __UpperCamelCase :int , __UpperCamelCase :int ):
A = self.get_parent(__UpperCamelCase )
A = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A = 0
A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A = 0
A = src_parent
A = self.set_counts[src_parent]
A = max(self.max_set , __UpperCamelCase )
return True
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
if self.parents[disj_set] == disj_set:
return disj_set
A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 292 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {'''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Optional[Any] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase (_snake_case ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = None
def __init__( self : int, _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Dict=None, _UpperCAmelCase : Any="<unk>", _UpperCAmelCase : Dict="<s>", _UpperCAmelCase : Optional[Any]="</s>", _UpperCAmelCase : List[str]="<pad>", _UpperCAmelCase : Tuple=False, _UpperCAmelCase : Tuple=False, **_UpperCAmelCase : List[Any], ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
UpperCamelCase__, UpperCamelCase__, tokenizer_file=UpperCamelCase__, unk_token=UpperCamelCase__, bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, pad_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__, **UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", UpperCamelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ : str = getattr(UpperCamelCase__, pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE__ : Any = pre_tok_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = add_prefix_space
def A_ ( self : Optional[int], *_UpperCAmelCase : Any, **_UpperCAmelCase : str ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.get("is_split_into_words", UpperCamelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*UpperCamelCase__, **UpperCamelCase__ )
def A_ ( self : Any, *_UpperCAmelCase : Union[str, Any], **_UpperCAmelCase : Tuple ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.get("is_split_into_words", UpperCamelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*UpperCamelCase__, **UpperCamelCase__ )
def A_ ( self : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Tuple = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self._tokenizer.model.save(UpperCamelCase__, name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def A_ ( self : Dict, _UpperCAmelCase : int ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ : Any = input_ids[-self.model_max_length :]
return input_ids
| 363 |
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Any = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , 0 )
SCREAMING_SNAKE_CASE__ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _a ( SCREAMING_SNAKE_CASE__ : int = 30 ) -> int:
'''simple docstring'''
return _calculate(SCREAMING_SNAKE_CASE__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 191 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_UpperCamelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_UpperCamelCase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__UpperCAmelCase : List[Any] = bs[:]
__UpperCAmelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : int = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : int = char
return pairs
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
__UpperCAmelCase : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
__UpperCAmelCase : List[str] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
__UpperCAmelCase : str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
__UpperCAmelCase : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : List[str] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__UpperCAmelCase : Union[str, Any] = json.load(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : List[str] = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
__UpperCAmelCase : Dict = merges_handle.read().split("""\n""" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Optional[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self ) -> str:
'''simple docstring'''
return len(self.encoder )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : int = tuple(__UpperCAmelCase )
__UpperCAmelCase : List[str] = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : str = bigram
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[str] = 0
while i < len(__UpperCAmelCase ):
try:
__UpperCAmelCase : Any = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Optional[Any] = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[str] = tuple(__UpperCAmelCase )
__UpperCAmelCase : str = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
__UpperCAmelCase : List[Any] = get_pairs(__UpperCAmelCase )
__UpperCAmelCase : str = """ """.join(__UpperCAmelCase )
__UpperCAmelCase : str = word
return word
def __A ( self , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = []
for token in re.findall(self.pat , __UpperCAmelCase ):
__UpperCAmelCase : Tuple = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = """""".join(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : List[str] = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : Dict = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + """\n""" )
__UpperCAmelCase : int = 0
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__UpperCAmelCase : Dict = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : str = """ """ + text
return (text, kwargs)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __A ( self , __UpperCAmelCase ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : int = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCAmelCase )
__UpperCAmelCase : Tuple = """ """.join(__UpperCAmelCase )
__UpperCAmelCase : Any = self.encode(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCAmelCase : Any = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 254 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : list[list[int]] = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__UpperCAmelCase : str = 1
for n in range(m + 1 ):
for k in range(1 , lowerCAmelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_UpperCamelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_UpperCamelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 254 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if not isinstance(a__ , a__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase : Optional[Any] = str(abs(a__ ) )
lowercase : int = [list(a__ ) for char in range(len(a__ ) )]
for index in range(len(a__ ) ):
num_transpositions[index].pop(a__ )
return max(
int("""""".join(list(a__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 360 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( lowerCAmelCase ):
_a : Dict= "mobilenet_v1"
def __init__( self ,snake_case=3 ,snake_case=224 ,snake_case=1.0 ,snake_case=8 ,snake_case="relu6" ,snake_case=True ,snake_case=0.999 ,snake_case=0.02 ,snake_case=0.001 ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowercase : int = num_channels
lowercase : Union[str, Any] = image_size
lowercase : int = depth_multiplier
lowercase : Tuple = min_depth
lowercase : Dict = hidden_act
lowercase : Dict = tf_padding
lowercase : Dict = classifier_dropout_prob
lowercase : int = initializer_range
lowercase : List[str] = layer_norm_eps
class __snake_case ( lowerCAmelCase ):
_a : int= version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 285 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = KandinskyVaaPipeline
a__ = [
"""image_embeds""",
"""negative_image_embeds""",
]
a__ = ["""image_embeds""", """negative_image_embeds"""]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return 32
@property
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
return 32
@property
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return 100
@property
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.dummy_unet
__magic_name__ = self.dummy_movq
__magic_name__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCamelCase__ , )
__magic_name__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=0 ) -> List[str]:
"""simple docstring"""
__magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
__magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
__magic_name__ = torch.manual_seed(UpperCamelCase__ )
else:
__magic_name__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
__magic_name__ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = """cpu"""
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**UpperCamelCase__ )
__magic_name__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
__magic_name__ = output.images
__magic_name__ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
__magic_name__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
__magic_name__ = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
__magic_name__ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = """red cat, 4k photo"""
__magic_name__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
__magic_name__ , __magic_name__ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
__magic_name__ = pipeline(
image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , output_type="""np""" , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 88 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ =''''''
lowerCamelCase_ =''''''
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =256
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 )
lowerCamelCase_ =copy.deepcopy(self.img )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' )
lowerCamelCase_ =np.sum(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
lowerCamelCase_ =x[i] / self.k
self.sk += prk
lowerCamelCase_ =(self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase_ =int(last % last )
lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase )
lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase_ =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase_ =self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase_ =self.last_list[num]
cva.imwrite('''output_data/output.jpg''', self.img )
def lowercase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel(), 256, [0, 256] )
def lowercase__ ( self ):
"""simple docstring"""
cva.imshow('''Output-Image''', self.img )
cva.imshow('''Input-Image''', self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
a_ : Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 75 | 0 |
from __future__ import annotations
lowerCamelCase_ = 1.6_021E-19 # units = C
def __magic_name__ ( __a : float , __a : float , __a : float , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 |
import argparse
from collections import defaultdict
import yaml
lowerCamelCase_ = '''docs/source/en/_toctree.yml'''
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCamelCase__ = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ = []
for duplicate_key in duplicates:
UpperCamelCase__ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def __magic_name__ ( __a : int=False ):
'''simple docstring'''
with open(__a , encoding="""utf-8""" ) as f:
UpperCamelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCamelCase__ = api_doc[model_idx]["""sections"""]
UpperCamelCase__ = [(idx, section) for idx, section in enumerate(__a ) if """sections""" in section]
UpperCamelCase__ = False
for idx, modality_doc in modalities_docs:
UpperCamelCase__ = modality_doc["""sections"""]
UpperCamelCase__ = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
UpperCamelCase__ = True
if overwrite:
UpperCamelCase__ = new_modality_doc
if diff:
if overwrite:
UpperCamelCase__ = model_doc
UpperCamelCase__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 178 | 1 |
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
__a = set()
# edges = list of graph's edges
__a = get_edges(_lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__a = edges.pop()
chosen_vertices.add(_lowerCamelCase )
chosen_vertices.add(_lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase )
return chosen_vertices
def __lowerCAmelCase ( a__ ) -> int:
__a = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 6 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase__ : List[Any] = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: str=None ):
__SCREAMING_SNAKE_CASE : Optional[int] = True
while ask_again:
__SCREAMING_SNAKE_CASE : Tuple = input(_lowerCamelCase )
try:
if default is not None and len(_lowerCamelCase ) == 0:
return default
return convert_value(_lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any]=[] , _lowerCamelCase: List[Any]=None , _lowerCamelCase: Optional[Any]=0 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = BulletMenu(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = menu.run(default_choice=_lowerCamelCase )
return convert_value(_lowerCamelCase ) if convert_value is not None else result
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
__SCREAMING_SNAKE_CASE : List[str] = int(_lowerCamelCase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Tuple = int(_lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[str] = int(_lowerCamelCase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : int = int(_lowerCamelCase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
return {"yes": True, "no": False}[value.lower()]
class _UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = super()._format_usage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = usage.replace("""<command> [<args>] """ , """""" )
return usage | 112 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( __lowerCamelCase ) -> bool:
a = int(number**0.5 )
return number == sq * sq
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]:
a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a = x_den * y_den * z_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __A ( __lowerCamelCase = 35 ) -> int:
a = set()
a = 42
a = Fraction(0 )
a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a = x_num * y_den + x_den * y_num
a = x_den * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
a = x_num * y_num
a = x_den * y_num + x_num * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = x_num * x_num * y_num * y_num
a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 358 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width
__UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase : str = 1 / 100
__UpperCamelCase : Optional[int] = ""
__UpperCamelCase : List[Any] = ""
__UpperCamelCase : Union[str, Any] = ""
__UpperCamelCase : Tuple = 250
def __A ( ) -> None:
a , a = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
a = random.sample(range(len(__lowerCamelCase ) ) , 4 )
a , a , a = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a = random_chars(32 )
a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
a = []
for anno in new_annos:
a = anno[3] - anno[1]
a = anno[4] - anno[2]
a = anno[1] + width / 2
a = anno[2] + height / 2
a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCamelCase )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
a = []
a = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
a = in_file.readlines()
a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' )
a = []
for obj_list in obj_lists:
a = obj_list.rstrip("""\n""" ).split(""" """ )
a = float(obj[1] ) - float(obj[3] ) / 2
a = float(obj[2] ) - float(obj[4] ) / 2
a = float(obj[1] ) + float(obj[3] ) / 2
a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]:
a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = int(scale_x * output_size[1] )
a = int(scale_y * output_size[0] )
a = []
a = []
for i, index in enumerate(__lowerCamelCase ):
a = all_img_list[index]
path_list.append(__lowerCamelCase )
a = all_annos[index]
a = cva.imread(__lowerCamelCase )
if i == 0: # top-left
a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = bbox[2] * scale_y
a = bbox[3] * scale_x
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = bbox[2] * scale_y
a = scale_x + bbox[3] * (1 - scale_x)
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = scale_y + bbox[2] * (1 - scale_y)
a = bbox[3] * scale_x
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = scale_y + bbox[2] * (1 - scale_y)
a = scale_x + bbox[3] * (1 - scale_x)
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __A ( __lowerCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 347 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Tuple = LDMTextToImagePipeline
lowerCamelCase__: Optional[int] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
lowerCamelCase__: Tuple = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCamelCase__: Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__: str = False
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__UpperCAmelCase : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase : Any = CLIPTextModel(__lowerCamelCase )
__UpperCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=0 ) -> Any:
if str(__lowerCamelCase ).startswith("mps" ):
__UpperCAmelCase : int = torch.manual_seed(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self: int ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[Any] = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = LDMTextToImagePipeline(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : str = self.get_dummy_inputs(__lowerCamelCase )
__UpperCAmelCase : Tuple = pipe(**__lowerCamelCase ).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCAmelCase : Dict = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any]=torch.floataa , __lowerCamelCase: Dict=0 ) -> List[str]:
__UpperCAmelCase : Tuple = torch.manual_seed(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : Any = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
__UpperCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self: Dict ) -> Tuple:
__UpperCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : List[str] = self.get_inputs(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = pipe(**__lowerCamelCase ).images
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCAmelCase : int = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
__UpperCAmelCase : List[Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: int ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict=torch.floataa , __lowerCamelCase: Optional[int]=0 ) -> Any:
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : Optional[int] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
__UpperCAmelCase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase : Dict = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : List[str] = self.get_inputs(__lowerCamelCase )
__UpperCAmelCase : str = pipe(**__lowerCamelCase ).images[0]
__UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__UpperCAmelCase : str = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 157 | import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: Tuple = ["input_features"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any]=80 , __lowerCamelCase: Optional[Any]=1_60_00 , __lowerCamelCase: Any=1_60 , __lowerCamelCase: Optional[int]=30 , __lowerCamelCase: List[str]=4_00 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Union[str, Any]=False , **__lowerCamelCase: Dict , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = n_fft
__UpperCAmelCase : List[str] = hop_length
__UpperCAmelCase : Optional[Any] = chunk_length
__UpperCAmelCase : Union[str, Any] = chunk_length * sampling_rate
__UpperCAmelCase : Any = self.n_samples // hop_length
__UpperCAmelCase : Tuple = sampling_rate
__UpperCAmelCase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: np.array ) -> np.ndarray:
__UpperCAmelCase : List[Any] = spectrogram(
__lowerCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
__UpperCAmelCase : Union[str, Any] = log_spec[:, :-1]
__UpperCAmelCase : List[Any] = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
__UpperCAmelCase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowerCamelCase ( __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase : Tuple = np.array(__lowerCamelCase , np.intaa )
__UpperCAmelCase : Dict = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
__UpperCAmelCase : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase : Dict = padding_value
normed_input_values.append(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: Dict , __lowerCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[str] = "max_length" , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCAmelCase : List[Any] = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : str = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
__UpperCAmelCase : List[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
__UpperCAmelCase : List[str] = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
__UpperCAmelCase : str = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
__UpperCAmelCase : Any = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
__UpperCAmelCase : Dict = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
__UpperCAmelCase : str = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase : int = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def _lowerCamelCase ( self: str ) -> Dict[str, Any]:
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 157 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE_ ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 355 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = ['input_values', 'padding_mask']
def __init__( self : Optional[Any] , __snake_case : int = 1 , __snake_case : int = 2_40_00 , __snake_case : float = 0.0 , __snake_case : float = None , __snake_case : float = None , **__snake_case : Dict , ):
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = overlap
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : List[str] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : Optional[Union[bool, str, PaddingStrategy]] = None , __snake_case : Optional[bool] = False , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase_ = True
UpperCAmelCase_ = bool(
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase_ = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(__snake_case ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCAmelCase_ = None
UpperCAmelCase_ = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase_ = min(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase_ = max(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase_ = '''max_length'''
else:
UpperCAmelCase_ = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase_ = self.pad(
__snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , )
if padding:
UpperCAmelCase_ = padded_inputs.pop('''attention_mask''' )
UpperCAmelCase_ = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
UpperCAmelCase_ = example[..., None]
input_values.append(example.T )
UpperCAmelCase_ = input_values
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 177 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def a ( *_lowercase : List[Any] , **_lowercase : Any ):
pass
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a ( self : str , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Dict ):
__UpperCAmelCase = DepthEstimationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str ):
__UpperCAmelCase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , UpperCamelCase__ )
import datasets
__UpperCAmelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__UpperCAmelCase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , UpperCamelCase__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def a ( self : Union[str, Any] ):
pass
@slow
@require_torch
def a ( self : str ):
__UpperCAmelCase = '''Intel/dpt-large'''
__UpperCAmelCase = pipeline('''depth-estimation''' , model=UpperCamelCase__ )
__UpperCAmelCase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__UpperCAmelCase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def a ( self : Optional[Any] ):
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 332 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase ( _A = 3 ):
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase_ = QuantumRegister(_A , '''qr''' )
lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' )
lowerCAmelCase_ = QuantumCircuit(_A , _A )
lowerCAmelCase_ = number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase_ = execute(_A , _A , shots=10000 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 278 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 254 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :int = credit_card_number
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase__ :Optional[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase__ :str = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 254 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__SCREAMING_SNAKE_CASE : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__SCREAMING_SNAKE_CASE : List[str] = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :Any ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , **lowerCAmelCase__ :int ) -> Optional[int]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> str:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : int = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : int = self.get_image_processor(do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Dict = processor(text=lowerCAmelCase__ , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''google/owlvit-base-patch32'''
__SCREAMING_SNAKE_CASE : str = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = ['''cat''', '''nasa badge''']
__SCREAMING_SNAKE_CASE : Dict = processor(text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = '''google/owlvit-base-patch32'''
__SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = [['''cat''', '''nasa badge'''], ['''person''']]
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 16
__SCREAMING_SNAKE_CASE : List[str] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = max([len(lowerCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = '''google/owlvit-base-patch32'''
__SCREAMING_SNAKE_CASE : Any = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = ['''cat''', '''nasa badge''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = 16
__SCREAMING_SNAKE_CASE : List[str] = inputs['''input_ids''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : int = processor(images=lowerCAmelCase__ , query_images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : str = processor.batch_decode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 9 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A : Optional[int] = logging.get_logger(__name__)
A : Any = {'''vocab_file''': '''spiece.model'''}
A : Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCamelCase (A__ ):
"""simple docstring"""
def __init__( self : int , __magic_name__ : List[Any] , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Any=False , __magic_name__ : List[Any]="<s>" , __magic_name__ : str="</s>" , __magic_name__ : Dict="<unk>" , __magic_name__ : Any="<sep>" , __magic_name__ : Dict="<pad>" , __magic_name__ : Optional[Any]="<cls>" , __magic_name__ : Optional[int]="<mask>" , __magic_name__ : List[str]=["<eop>", "<eod>"] , __magic_name__ : str = None , **__magic_name__ : int , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
SCREAMING_SNAKE_CASE_ = jieba
SCREAMING_SNAKE_CASE_ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self : List[str] ) -> List[Any]:
return len(self.sp_model )
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self : str , __magic_name__ : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : int , __magic_name__ : Any ) -> List[Any]:
if self.remove_space:
SCREAMING_SNAKE_CASE_ = " ".join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE_ = inputs
SCREAMING_SNAKE_CASE_ = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ = unicodedata.normalize("NFKD" , lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = "".join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ = outputs.lower()
return outputs
def __A ( self : int , __magic_name__ : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.preprocess_text(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = []
for piece in pieces:
if len(lowerCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE_ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase__ )
else:
new_pieces.append(lowerCamelCase__ )
return new_pieces
def __A ( self : Dict , __magic_name__ : int ) -> int:
return self.sp_model.PieceToId(lowerCamelCase__ )
def __A ( self : Tuple , __magic_name__ : str ) -> Union[str, Any]:
return self.sp_model.IdToPiece(lowerCamelCase__ )
def __A ( self : List[str] , __magic_name__ : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = "".join(lowerCamelCase__ ).replace(lowerCamelCase__ , " " ).strip()
return out_string
def __A ( self : List[str] , __magic_name__ : Any , __magic_name__ : List[Any] = None ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : Any = None , __magic_name__ : List[Any] = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1]
return ([0] * len(lowerCamelCase__ )) + [1, 1]
def __A ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] = None ) -> str:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def __A ( self : Any , *__magic_name__ : Optional[int] , **__magic_name__ : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = super()._decode(*lowerCamelCase__ , **lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 357 | from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bool:
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase )
if number < 0:
return False
lowerCamelCase__ : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Optional[int] = [1]
for i in range(2 , a_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE :List[str] = []
__SCREAMING_SNAKE_CASE :Optional[Any] = list(range(a_ ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE :Optional[int] = factorials.pop()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = divmod(a_ , a_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 | 0 |
from __future__ import annotations
from collections import namedtuple
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
def __UpperCamelCase ( _A = 4000000 ):
lowerCAmelCase_ = [0, 1]
lowerCAmelCase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase_ = 0
for j in range(len(_A ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 167 | 0 |
import argparse
import struct
import unittest
class __magic_name__ :
def __init__( self : Dict , lowerCamelCase__ : bytes ) -> None:
'''simple docstring'''
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : List[str] = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCamelCase__ : int = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : bytes ) -> bytes:
'''simple docstring'''
UpperCamelCase__ : int = b'''\x80''' + (b'''\x00''' * (63 - (len(lowerCamelCase__ ) + 8) % 64))
UpperCamelCase__ : Optional[Any] = struct.pack('''>Q''' , (len(lowerCamelCase__ ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase__ ( self : List[str] ) -> None:
'''simple docstring'''
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Optional[Any] = list(struct.unpack('''>16L''' , lowerCamelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : int = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Union[str, Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCamelCase__ : int = self.ror(lowerCamelCase__ , 6 ) ^ self.ror(lowerCamelCase__ , 11 ) ^ self.ror(lowerCamelCase__ , 25 )
UpperCamelCase__ : Union[str, Any] = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCamelCase__ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCamelCase__ : Optional[int] = self.ror(lowerCamelCase__ , 2 ) ^ self.ror(lowerCamelCase__ , 13 ) ^ self.ror(lowerCamelCase__ , 22 )
UpperCamelCase__ : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : int = (sa + maj) % 0x1_00_00_00_00
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCamelCase__ : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Any = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCamelCase__ : List[Any] = ''''''.join([hex(lowerCamelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> int:
'''simple docstring'''
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ) -> None:
'''simple docstring'''
import hashlib
UpperCamelCase__ : int = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(lowerCamelCase__ ).hash , hashlib.shaaaa(lowerCamelCase__ ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCamelCase__ : Dict = f.read()
else:
UpperCamelCase__ : str = bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 146 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=13 , lowerCamelCase__ : Union[str, Any]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=0.6 , lowerCamelCase__ : int=None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : List[Any] = image_size
UpperCamelCase__ : str = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : int = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : str = mask_ratio
UpperCamelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : Optional[int] = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
UpperCamelCase__ : int = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Any = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A: Union[str, Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
A: Any = False
A: str = False
A: Optional[int] = False
A: Any = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ViTMAEModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : Union[str, Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ : int = outputs[0].cpu().numpy()
UpperCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
# Make sure we don't have nans
UpperCamelCase__ : Union[str, Any] = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : Dict = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : str = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Tuple = ViTMAEConfig()
UpperCamelCase__ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ , noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
UpperCamelCase__ : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase__ ) , atol=1E-4 ) )
| 146 | 1 |
"""simple docstring"""
from itertools import product
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Any = sides_number
lowerCAmelCase__ :Dict = max_face_number * dice_number
lowerCAmelCase__ :Optional[Any] = [0] * (max_total + 1)
lowerCAmelCase__ :List[str] = 1
lowerCAmelCase__ :Dict = range(_SCREAMING_SNAKE_CASE , max_face_number + 1 )
for dice_numbers in product(_SCREAMING_SNAKE_CASE , repeat=_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = sum(_SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def __A () ->float:
"""simple docstring"""
lowerCAmelCase__ :str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase__ :Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :List[str] = 9
lowerCAmelCase__ :Any = 4 * 9
lowerCAmelCase__ :Optional[int] = 6
for peter_total in range(_SCREAMING_SNAKE_CASE , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase__ :Optional[int] = (4**9) * (6**6)
lowerCAmelCase__ :int = peter_wins_count / total_games_number
lowerCAmelCase__ :Tuple = round(_SCREAMING_SNAKE_CASE , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 254 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A (_SCREAMING_SNAKE_CASE = "" ) ->dict[str, float]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowerCAmelCase__ :str = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
lowerCAmelCase__ :List[Any] = soup.find_all('td' , attrs='titleColumn' )
lowerCAmelCase__ :Optional[int] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
}
def __A (_SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = get_imdb_top_aaa_movies()
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='' ) as out_file:
lowerCAmelCase__ :Dict = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 254 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCAmelCase : Dict = True
from torch.cuda.amp import autocast
UpperCAmelCase : Tuple = logging.getLogger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase : Optional[str] = field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCamelCase : Optional[bool] = field(
default=a_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCamelCase : Optional[bool] = field(
default=a_ , metadata={"help": "Whether to log verbose messages or not."} , )
UpperCamelCase : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
UpperCamelCase : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
UpperCamelCase : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} )
def __lowerCamelCase ( lowerCamelCase__ : ModelArguments , lowerCamelCase__ : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCamelCase = logging.WARNING
if model_args.verbose_logging:
lowerCamelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowerCamelCase = logging.INFO
logger.setLevel(lowerCamelCase__ )
@dataclass
class __lowercase :
"""simple docstring"""
UpperCamelCase : str = field(
default=a_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
UpperCamelCase : Optional[str] = field(
default=a_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCamelCase : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCamelCase : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
UpperCamelCase : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
UpperCamelCase : bool = field(
default=a_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCamelCase : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
UpperCamelCase : Optional[int] = field(
default=a_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCamelCase : Optional[float] = field(
default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class __lowercase :
"""simple docstring"""
UpperCamelCase : WavaVecaForPreTraining
UpperCamelCase : WavaVecaFeatureExtractor
UpperCamelCase : Union[bool, str] = "longest"
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
def __call__( self , A ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCamelCase = self.feature_extractor.pad(
A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
lowerCamelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
lowerCamelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCamelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
lowerCamelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCamelCase = 1
lowerCamelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCamelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=A , min_masks=2 , )
return batch
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , *A , A=1 , A=0 , A=1.0 , **A ) -> List[Any]:
'''simple docstring'''
super().__init__(*A , **A )
lowerCamelCase = 0
lowerCamelCase = max_gumbel_temp
lowerCamelCase = min_gumbel_temp
lowerCamelCase = gumbel_temp_decay
def __A ( self , A , A ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCamelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
lowerCamelCase = self.compute_loss(A , A )
else:
lowerCamelCase = self.compute_loss(A , A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCamelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowerCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_args_into_dataclasses()
configure_logger(lowerCamelCase__ , lowerCamelCase__ )
# Downloading and loading a dataset from the hub.
lowerCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCamelCase = DatasetDict()
lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCamelCase = DatasetDict()
lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase__ )
def prepare_dataset(lowerCamelCase__ : List[Any] ):
# check that all files have the correct sampling rate
lowerCamelCase , lowerCamelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowerCamelCase = datasets.map(
lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
lowerCamelCase = vectorized_datasets.filter(
lambda lowerCamelCase__ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCamelCase__ : Optional[Any] ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowerCamelCase = vectorized_datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCamelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
lowerCamelCase = WavaVecaForPreTraining(lowerCamelCase__ )
lowerCamelCase = DataCollatorForWavaVecaPretraining(model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
lowerCamelCase = WavaVecaPreTrainer(
model=lowerCamelCase__ , data_collator=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=lowerCamelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 252 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCamelCase : Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCamelCase : Optional[Any] = "document_qa"
UpperCamelCase : Any = AutoProcessor
UpperCamelCase : Optional[int] = VisionEncoderDecoderModel
UpperCamelCase : Any = ["image", "text"]
UpperCamelCase : str = ["text"]
def __init__( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A , **A )
def __A ( self , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase = task_prompt.replace("""{user_input}""" , A )
lowerCamelCase = self.pre_processor.tokenizer(
A , add_special_tokens=A , return_tensors="""pt""" ).input_ids
lowerCamelCase = self.pre_processor(A , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , A ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A , ).sequences
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = self.pre_processor.batch_decode(A )[0]
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowerCamelCase = re.sub(r"""<.*?>""" , """""" , A , count=1 ).strip() # remove first task start token
lowerCamelCase = self.pre_processor.tokenajson(A )
return sequence["answer"]
| 252 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 |
'''simple docstring'''
from collections import deque
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
'''simple docstring'''
A: Union[str, Any] = process_name # process name
A: List[str] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A: Dict = arrival_time
A: Optional[Any] = burst_time # remaining burst time
A: Any = 0 # total time of the process wait in ready queue
A: Any = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None:
'''simple docstring'''
A: Dict = number_of_queues
# time slice of queues that round robin algorithm applied
A: int = time_slices
# unfinished process is in this ready_queue
A: Tuple = queue
# current time
A: int = current_time
# finished process is in this sequence queue
A: deque[Process] = deque()
def _snake_case ( self : List[Any] ) -> list[str]:
'''simple docstring'''
A: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Any = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
A: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A: Any = 0
# set the process's turnaround time because it is finished
A: int = self.current_time - cp.arrival_time
# set the completion time
A: List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
A: Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A: Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A: int = 0
# set the finish time
A: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
A: Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _snake_case ( self : Optional[Any] ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
A , A: Optional[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 334 | 1 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : List[str] = F"Expected string as input, found {type(__magic_name__ )}"
raise ValueError(__magic_name__ )
if not isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"Expected boolean as use_pascal parameter, found {type(__magic_name__ )}"
raise ValueError(__magic_name__ )
UpperCAmelCase : List[Any] = input_str.split("_" )
UpperCAmelCase : str = 0 if use_pascal else 1
UpperCAmelCase : Optional[Any] = words[start_index:]
UpperCAmelCase : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase : List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 311 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 262 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __A ( unittest.TestCase ):
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase =Vector()
def _snake_case ( self ):
lowerCamelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,0,0,0,0,1)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2] )
lowerCamelCase =Vector([1, 2, 3, 4, 5] )
lowerCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _snake_case ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _snake_case ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , """(3,4,7)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase =x.copy()
self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _snake_case ( self ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 262 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Any , _A : str = "▁" , _A : bool = True , _A : Union[str, AddedToken] = "<unk>" , _A : Union[str, AddedToken] = "</s>" , _A : Union[str, AddedToken] = "<pad>" , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
__SCREAMING_SNAKE_CASE : int = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__SCREAMING_SNAKE_CASE : Any = token_dict['''token''']
__SCREAMING_SNAKE_CASE : Optional[int] = Tokenizer(Unigram() )
__SCREAMING_SNAKE_CASE : List[str] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
__SCREAMING_SNAKE_CASE : Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_A , add_prefix_space=_A ),
pre_tokenizers.Digits(individual_digits=_A ),
pre_tokenizers.Punctuation(),
] )
__SCREAMING_SNAKE_CASE : Tuple = decoders.Metaspace(replacement=_A , add_prefix_space=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
__SCREAMING_SNAKE_CASE : Dict = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_A , _A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : Union[str, List[str]] , _A : int = 8000 , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Tuple = [files]
self._tokenizer.train(_A , trainer=_A )
self.add_unk_id()
def UpperCAmelCase__ ( self : Tuple , _A : Union[Iterator[str], Iterator[Iterator[str]]] , _A : int = 8000 , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
self._tokenizer.train_from_iterator(_A , trainer=_A )
self.add_unk_id()
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self._tokenizer.to_str() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.special_tokens['''unk''']['''id''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = Tokenizer.from_str(json.dumps(_A ) )
| 303 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=1_6 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=8 , ):
UpperCamelCase_: str = parent
UpperCamelCase_: Optional[int] = batch_size
UpperCamelCase_: Tuple = image_size
UpperCamelCase_: Tuple = patch_size
UpperCamelCase_: List[Any] = num_channels
UpperCamelCase_: List[Any] = embed_dim
UpperCamelCase_: int = depths
UpperCamelCase_: List[Any] = num_heads
UpperCamelCase_: Any = window_size
UpperCamelCase_: Optional[Any] = mlp_ratio
UpperCamelCase_: Optional[Any] = qkv_bias
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: str = attention_probs_dropout_prob
UpperCamelCase_: Tuple = drop_path_rate
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: str = use_absolute_embeddings
UpperCamelCase_: Optional[Any] = patch_norm
UpperCamelCase_: Optional[int] = layer_norm_eps
UpperCamelCase_: Any = initializer_range
UpperCamelCase_: Union[str, Any] = is_training
UpperCamelCase_: Union[str, Any] = scope
UpperCamelCase_: List[str] = use_labels
UpperCamelCase_: Optional[int] = type_sequence_label_size
UpperCamelCase_: Optional[int] = encoder_stride
def _a ( self ):
UpperCamelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: Optional[int] = None
if self.use_labels:
UpperCamelCase_: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = SwinvaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[str] = model(_lowerCamelCase )
UpperCamelCase_: Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase_: List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = SwinvaForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: Tuple = SwinvaForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.type_sequence_label_size
UpperCamelCase_: Optional[Any] = SwinvaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
UpperCamelCase_: str = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = config_and_inputs
UpperCamelCase_: Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a : str =(
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
a : Union[str, Any] =False
a : Tuple =False
a : str =False
a : List[str] =False
def _a ( self ):
UpperCamelCase_: List[str] = SwinvaModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=3_7 )
def _a ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _a ( self ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_: Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: int = model_class(_lowerCamelCase )
UpperCamelCase_: Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Tuple = [*signature.parameters.keys()]
UpperCamelCase_: Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: List[str] = True
for model_class in self.all_model_classes:
UpperCamelCase_: Tuple = True
UpperCamelCase_: List[Any] = False
UpperCamelCase_: List[str] = True
UpperCamelCase_: List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: Any = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: List[Any] = outputs.attentions
UpperCamelCase_: str = len(self.model_tester.depths )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase_: Optional[Any] = True
UpperCamelCase_: Optional[int] = config.window_size**2
UpperCamelCase_: int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: Dict = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCamelCase_: List[Any] = len(_lowerCamelCase )
# Check attention is always last and order is fine
UpperCamelCase_: List[Any] = True
UpperCamelCase_: Dict = True
UpperCamelCase_: Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: List[str] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
UpperCamelCase_: Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCamelCase_: List[str] = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCamelCase ) )
UpperCamelCase_: str = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: str = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: List[str] = outputs.hidden_states
UpperCamelCase_: int = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swinv2 has a different seq_length
UpperCamelCase_: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase_: Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: str = reshaped_hidden_states[0].shape
UpperCamelCase_: List[str] = (
reshaped_hidden_states[0].view(_lowerCamelCase , _lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase_: Any = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Dict = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: Optional[Any] = 3
UpperCamelCase_: str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase_: Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase_: Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase_: int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Tuple = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: List[Any] = SwinvaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: Optional[Any] = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase_: str = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _a ( self ):
UpperCamelCase_: str = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_lowerCamelCase )
UpperCamelCase_: List[Any] = self.default_image_processor
UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_: Any = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: str = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: Dict = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) ) | 292 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _a ( _lowerCamelCase ):
raise NotImplementedError()
@abstractmethod
def _a ( self ):
raise NotImplementedError() | 292 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def lowercase ( lowerCAmelCase__ : SplitDict ) -> List[Any]:
__a = split_dict._to_yaml_list()
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__a = SplitDict._from_yaml_list(lowerCAmelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__a = None
# the split name of split_dict takes over the name of the split info object
__a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowercase ( lowerCAmelCase__ : int ) -> List[str]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
__a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 45 |
"""simple docstring"""
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = TransfoXLTokenizer
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''<unk> unwanted, running'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('''<unk> UNwanted , running''')
self.assertListEqual(lowercase_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [0, 4, 8, 7])
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = TransfoXLTokenizer(lower_case=lowercase_)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = TransfoXLTokenizer(lower_case=lowercase_)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = TransfoXLTokenizer(lower_case=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ : Tuple = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowercase_) , lowercase_)
self.assertEqual(tokenizer.convert_tokens_to_string(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = len(lowercase_)
tokenizer.add_tokens(['''new1''', '''new2'''])
tokenizer.move_added_token('''new1''' , 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowercase_) , original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''') , [1])
self.assertEqual(tokenizer.decode([1]) , '''new1''')
| 318 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _A (__a , __a , __a=1e-12 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
return jnp.matmul(__a , norm_emb_a.T )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = CLIPConfig
__UpperCamelCase = "clip_input"
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ):
'''simple docstring'''
if input_shape is None:
SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_)
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params''']
return random_params
def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
| 318 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = (3, 32, 128)
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
__lowerCamelCase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
__lowerCamelCase = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowerCamelCase = Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) )
return image_input
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowerCamelCase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
__lowerCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='np' )
__lowerCamelCase = processor(images=lowerCamelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowerCamelCase = "test"
__lowerCamelCase = processor(text=lowerCamelCase__ )
__lowerCamelCase = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowerCamelCase = "test"
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.char_decode(lowerCamelCase__ )
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ )
__lowerCamelCase = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowerCamelCase = None
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowerCamelCase = torch.randn(1 , 27 , 38 )
__lowerCamelCase = torch.randn(1 , 27 , 50_257 )
__lowerCamelCase = torch.randn(1 , 27 , 30_522 )
__lowerCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 90 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A : Dict , A : Dict=7 , A : Optional[int]=3 , A : Optional[int]=18 , A : Dict=30 , A : List[Any]=400 , A : Union[str, Any]=True , A : Tuple=None , A : List[Any]=True , A : int=None , A : Optional[int]=True , ):
_UpperCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 20}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : int = min_resolution
_UpperCAmelCase : Optional[int] = max_resolution
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Optional[Any] = size
_UpperCAmelCase : Tuple = do_center_crop
_UpperCAmelCase : Optional[int] = crop_size
_UpperCAmelCase : Optional[Any] = do_flip_channel_order
def _A ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Tuple = MobileViTImageProcessor if is_vision_available() else None
def _A ( self : List[Any] ):
_UpperCAmelCase : Any = MobileViTImageProcessingTester(self )
@property
def _A ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Tuple ):
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "size" ) )
self.assertTrue(hasattr(A , "do_center_crop" ) )
self.assertTrue(hasattr(A , "center_crop" ) )
self.assertTrue(hasattr(A , "do_flip_channel_order" ) )
def _A ( self : Any ):
_UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Any ):
pass
def _A ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : Union[str, Any] ):
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : Any ):
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 31 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Any =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="BlipImageProcessor"
lowerCamelCase : Optional[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple , a : str , a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = False
super().__init__(a , a )
__lowerCamelCase = self.image_processor
def __call__( self : int , a : ImageInput = None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__lowerCamelCase = self.tokenizer
__lowerCamelCase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
# add pixel_values
__lowerCamelCase = self.image_processor(a , return_tensors=a )
if text is not None:
__lowerCamelCase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
else:
__lowerCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Tuple , **a : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *a : int , **a : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 363 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def a__ ( A_, A_=False ):
'''simple docstring'''
try:
__magic_name__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__magic_name__ = default
else:
# KEY is set, convert it to True or False.
try:
__magic_name__ = strtobool(A_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__lowerCAmelCase : Tuple = parse_flag_from_env('RUN_SLOW', default=False)
__lowerCAmelCase : str = parse_flag_from_env('RUN_REMOTE', default=False)
__lowerCAmelCase : List[Any] = parse_flag_from_env('RUN_LOCAL', default=True)
__lowerCAmelCase : List[str] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__lowerCAmelCase : Tuple = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__lowerCAmelCase : Union[str, Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__lowerCAmelCase : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def a__ ( A_ ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires faiss""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires regex""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires elasticsearch""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires sqlalchemy""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__magic_name__ = unittest.skip("""test requires PyTorch""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__magic_name__ = unittest.skip("""test requires TensorFlow""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__magic_name__ = unittest.skip("""test requires JAX""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__magic_name__ = unittest.skip("""test requires Pillow""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
def _require_spacy_model(A_ ):
try:
import spacy # noqa F401
spacy.load(A_ )
except ImportError:
return unittest.skip("""test requires spacy""" )(A_ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(A_ ) )(A_ )
else:
return test_case
return _require_spacy_model
def a__ ( A_ ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__magic_name__ = unittest.skip("""test is slow""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__magic_name__ = unittest.skip("""test is local""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__magic_name__ = unittest.skip("""test is packaged""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__magic_name__ = unittest.skip("""test requires remote""" )(A_ )
return test_case
def a__ ( *A_ ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(A_ ) and name.startswith("""test""" ):
for decorator in decorators:
__magic_name__ = decorator(A_ )
setattr(cls, A_, A_ )
return cls
return decorate
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
pass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 0
a__ = 1
a__ = 2
@contextmanager
def a__ ( A_=OfflineSimulationMode.CONNECTION_FAILS, A_=1e-16 ):
'''simple docstring'''
__magic_name__ = requests.Session().request
def timeout_request(A_, A_, A_, **A_ ):
# Change the url to an invalid url so that the connection hangs
__magic_name__ = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__magic_name__ = timeout
try:
return online_request(A_, A_, **A_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__magic_name__ = url
__magic_name__ = e.args[0]
__magic_name__ = (max_retry_error.args[0].replace("""10.255.255.1""", f'''OfflineMock[{url}]''' ),)
__magic_name__ = (max_retry_error,)
raise
def raise_connection_error(A_, A_, **A_ ):
raise requests.ConnectionError("""Offline mode is enabled.""", request=A_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""", A_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""", A_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""", A_ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def a__ ( *A_, **A_ ):
'''simple docstring'''
__magic_name__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A_, **A_ ) as tmp_dir:
try:
os.chdir(A_ )
yield
finally:
os.chdir(A_ )
@contextmanager
def a__ ( ):
'''simple docstring'''
import gc
gc.collect()
__magic_name__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def a__ ( ):
'''simple docstring'''
import gc
gc.collect()
__magic_name__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def a__ ( A_, A_ ):
'''simple docstring'''
return deepcopy(A_ ).integers(0, 100, 10 ).tolist() == deepcopy(A_ ).integers(0, 100, 10 ).tolist()
def a__ ( A_ ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(A_, *A_, **A_ ):
try:
return func(*A_, **A_ )
except HTTPError as err:
if str(A_ ).startswith("""500""" ) or str(A_ ).startswith("""502""" ):
pytest.xfail(str(A_ ) )
raise err
return decorator.decorator(_wrapper, A_ )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = returncode
__magic_name__ = stdout
__magic_name__ = stderr
async def a__ ( A_, A_ ):
'''simple docstring'''
while True:
__magic_name__ = await stream.readline()
if line:
callback(A_ )
else:
break
async def a__ ( A_, A_=None, A_=None, A_=None, A_=False, A_=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """, """ """.join(A_ ) )
__magic_name__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=A_, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=A_, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__magic_name__ = []
__magic_name__ = []
def tee(A_, A_, A_, A_="" ):
__magic_name__ = line.decode("""utf-8""" ).rstrip()
sink.append(A_ )
if not quiet:
print(A_, A_, file=A_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda A_ : tee(A_, A_, sys.stdout, label="""stdout:""" ) ),
_read_stream(p.stderr, lambda A_ : tee(A_, A_, sys.stderr, label="""stderr:""" ) ),
], timeout=A_, )
return _RunOutput(await p.wait(), A_, A_ )
def a__ ( A_, A_=None, A_=None, A_=180, A_=False, A_=True ):
'''simple docstring'''
__magic_name__ = asyncio.get_event_loop()
__magic_name__ = loop.run_until_complete(
_stream_subprocess(A_, env=A_, stdin=A_, timeout=A_, quiet=A_, echo=A_ ) )
__magic_name__ = """ """.join(A_ )
if result.returncode > 0:
__magic_name__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def a__ ( ):
'''simple docstring'''
__magic_name__ = os.environ.get("""PYTEST_XDIST_WORKER""", """gw0""" )
__magic_name__ = re.sub(R"""^gw""", """""", A_, 0, re.M )
return int(A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = 29500
__magic_name__ = pytest_xdist_worker_id()
return port + uniq_delta
| 88 |
_a = 65_521
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = 1
lowerCamelCase__ = 0
for plain_chr in plain_text:
lowerCamelCase__ = (a + ord(__snake_case )) % MOD_ADLER
lowerCamelCase__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 209 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = TaConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 370 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case :Optional[Any] = logging.get_logger(__name__)
__snake_case :List[Any] = '''▁'''
__snake_case :List[Any] = {'''vocab_file''': '''spiece.model'''}
__snake_case :Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__snake_case :List[Any] = {
'''google/reformer-crime-and-punishment''': 52_4288,
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Any=[] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Dict):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__a = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE)
return token
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE) + token
__a = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE)
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE)
return out_string.strip()
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(__SCREAMING_SNAKE_CASE , '''wb''') as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 131 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
lowerCAmelCase = 'huggingface/label-files'
lowerCAmelCase = 'imagenet-1k-id2label.json'
lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase = BitConfig(
conv_layer=lowerCAmelCase_ , num_labels=1_0_0_0 , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , )
return config
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
if "stem.conv" in name:
lowerCAmelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowerCAmelCase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowerCAmelCase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowerCAmelCase = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase = 'bit.encoder.' + name
return name
def SCREAMING_SNAKE_CASE_ ( ) -> str:
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=False ) -> Optional[int]:
lowerCAmelCase = get_config(lowerCAmelCase_ )
# load original model from timm
lowerCAmelCase = create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase = state_dict.pop(lowerCAmelCase_ )
lowerCAmelCase = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCAmelCase = BitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# create image processor
lowerCAmelCase = create_transform(**resolve_data_config({} , model=lowerCAmelCase_ ) )
lowerCAmelCase = transform.transforms
lowerCAmelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase = BitImageProcessor(
do_resize=lowerCAmelCase_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase = prepare_img()
lowerCAmelCase = transform(lowerCAmelCase_ ).unsqueeze(0 )
lowerCAmelCase = processor(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ )
# verify logits
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase_ )
lowerCAmelCase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
lowercase__ : List[str] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 338 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
'''simple docstring'''
from manim import *
class __magic_name__ ( _UpperCAmelCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase_ : Dict = [mem.copy() for i in range(6 )]
lowercase_ : Dict = [mem.copy() for i in range(6 )]
lowercase_ : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase_ : int = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase_ : Optional[int] = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase_ : str = Text("""CPU""" , font_size=24 )
lowercase_ : int = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
lowercase_ : Union[str, Any] = [mem.copy() for i in range(4 )]
lowercase_ : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase_ : List[Any] = Text("""GPU""" , font_size=24 )
lowercase_ : int = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
lowercase_ : Tuple = [mem.copy() for i in range(6 )]
lowercase_ : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase_ : Any = Text("""Model""" , font_size=24 )
lowercase_ : int = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
lowercase_ : Tuple = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowercase_ : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
lowercase_ : Tuple = [mem.copy() for i in range(6 )]
lowercase_ : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase_ : int = Text("""Loaded Checkpoint""" , font_size=24 )
lowercase_ : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowercase_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase_ : Optional[int] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
lowercase_ : List[str] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowercase_ : str = MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
lowercase_ : Optional[Any] = []
lowercase_ : Any = []
for i, rect in enumerate(lowercase_ ):
lowercase_ : Tuple = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
lowercase_ : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 21 | '''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( UpperCAmelCase__ : list ) -> int:
if not postfix_notation:
return 0
lowercase_ : Any = {"""+""", """-""", """*""", """/"""}
lowercase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
a : str = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
a : Any = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
a : Tuple = [2, 4, 1, 5]
a : Any = len(train_data)
a : Any = 0.0_0_9
def __lowerCamelCase ( _lowercase , _lowercase="train" ) -> Dict:
return calculate_hypothesis_value(_lowercase , _lowercase ) - output(
_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : List[str] = 0
for i in range(len(_lowercase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowerCamelCase ( _lowercase , _lowercase=m ) -> Tuple:
UpperCAmelCase : int = 0
for i in range(_lowercase ):
if index == -1:
summation_value += _error(_lowercase )
else:
summation_value += _error(_lowercase ) * train_data[i][0][index]
return summation_value
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Tuple = summation_of_cost_derivative(_lowercase , _lowercase ) / m
return cost_derivative_value
def __lowerCamelCase ( ) -> Any:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.00_0002
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = 0
while True:
j += 1
UpperCAmelCase : Optional[int] = [0, 0, 0, 0]
for i in range(0 , len(_lowercase ) ):
UpperCAmelCase : Tuple = get_cost_derivative(i - 1 )
UpperCAmelCase : int = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowercase , _lowercase , atol=_lowercase , rtol=_lowercase , ):
break
UpperCAmelCase : List[Any] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowerCamelCase ( ) -> Optional[int]:
for i in range(len(_lowercase ) ):
print(("""Actual output value:""", output(_lowercase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(_lowercase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 265 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __snake_case :
snake_case__ : int
snake_case__ : TreeNode | None = None
snake_case__ : TreeNode | None = None
lowerCAmelCase__ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A_ : Optional[int] ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A_ : Any ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__SCREAMING_SNAKE_CASE ) != count_coins(__SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A_ : List[Any] ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1 )
_lowerCamelCase : Optional[Any] = get_distrib(node.left )
_lowerCamelCase : Optional[int] = get_distrib(node.right )
_lowerCamelCase : Any = 1 - left_distrib_excess
_lowerCamelCase : str = 1 - right_distrib_excess
_lowerCamelCase : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(__SCREAMING_SNAKE_CASE )
+ abs(__SCREAMING_SNAKE_CASE )
)
_lowerCamelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
return get_distrib(__SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "xglm"
snake_case__ : Dict = ["past_key_values"]
snake_case__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=2_5_6_0_0_8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : Dict=1_0_2_4 , __lowerCAmelCase : List[str]=4_0_9_6 , __lowerCAmelCase : Tuple=2_4 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : int = d_model
_lowerCamelCase : Optional[Any] = ffn_dim
_lowerCamelCase : Any = num_layers
_lowerCamelCase : Union[str, Any] = attention_heads
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : str = use_cache
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 175 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Dict = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : str = BartTokenizer
def __init__( self : Any , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Dict="replace" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Optional[int]="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : Optional[int] , ):
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__) != add_prefix_space:
SCREAMING_SNAKE_CASE_: List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop("type"))
SCREAMING_SNAKE_CASE_: str = add_prefix_space
SCREAMING_SNAKE_CASE_: Optional[Any] = pre_tok_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_: Tuple = "post_processor"
SCREAMING_SNAKE_CASE_: Optional[Any] = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__)
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_: Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_: Any = tuple(state["sep"])
if "cls" in state:
SCREAMING_SNAKE_CASE_: Optional[Any] = tuple(state["cls"])
SCREAMING_SNAKE_CASE_: Optional[Any] = False
if state.get("add_prefix_space" , lowerCAmelCase__) != add_prefix_space:
SCREAMING_SNAKE_CASE_: Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE_: List[Any] = True
if state.get("trim_offsets" , lowerCAmelCase__) != trim_offsets:
SCREAMING_SNAKE_CASE_: Optional[int] = trim_offsets
SCREAMING_SNAKE_CASE_: int = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_: List[str] = getattr(lowerCAmelCase__ , state.pop("type"))
SCREAMING_SNAKE_CASE_: str = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : int):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else value
SCREAMING_SNAKE_CASE_: List[str] = value
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[int] = kwargs.get("is_split_into_words" , lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: int = kwargs.get("is_split_into_words" , lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Tuple = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 13 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def a__ ( snake_case__ ) -> str:
if not sentence:
return ""
lowerCamelCase = dict(zip(__snake_case , __snake_case ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def a__ ( snake_case__ ) -> int:
lowerCamelCase , lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase = emb.weight.data
return lin_layer
def a__ ( snake_case__ ) -> Tuple:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCamelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCamelCase = checkpoint["""model"""]
remove_ignore_keys_(snake_case__ )
lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
lowerCamelCase = XGLMConfig(
vocab_size=snake_case__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase = XGLMForCausalLM(snake_case__ )
lowerCamelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
print(snake_case__ )
lowerCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Tuple = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 168 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Initialise PyTorch model
__snake_case : List[str] = MobileBertConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
__snake_case : int = MobileBertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
__snake_case : Optional[int] = load_tf_weights_in_mobilebert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 123 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Any:
__snake_case : List[Any] = dataset
__snake_case : Optional[int] = process
__snake_case : str = params
def __len__( self : Optional[Any] ) -> Any:
return len(self.dataset )
def __getitem__( self : Dict , lowerCamelCase : List[Any] ) -> List[str]:
__snake_case : List[Any] = self.dataset[i]
__snake_case : Tuple = self.process(lowerCamelCase , **self.params )
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None ) -> int:
__snake_case : List[Any] = loader
__snake_case : Dict = infer
__snake_case : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__snake_case : Union[str, Any] = None
__snake_case : Optional[Any] = loader_batch_size
# Internal bookkeeping
__snake_case : int = None
__snake_case : Optional[int] = None
def __len__( self : Optional[Any] ) -> Tuple:
return len(self.loader )
def __iter__( self : str ) -> Tuple:
__snake_case : int = iter(self.loader )
return self
def __snake_case ( self : int ) -> Any:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
# Convert ModelOutput to tuple first
__snake_case : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__snake_case : Union[str, Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : Optional[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__snake_case : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__snake_case : str = self._loader_batch_data.__class__(lowerCamelCase )
self._loader_batch_index += 1
return result
def __snake_case ( self : Dict ) -> Union[str, Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__snake_case : List[str] = next(self.iterator )
__snake_case : int = self.infer(lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : List[Any] = processed
else:
__snake_case : Optional[Any] = list(processed.keys() )[0]
__snake_case : List[Any] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[str] = len(lowerCamelCase )
else:
__snake_case : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Optional[Any] = observed_batch_size
# Setting internal index to unwrap the batch
__snake_case : Union[str, Any] = processed
__snake_case : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None ) -> Any:
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __iter__( self : Optional[int] ) -> Optional[int]:
__snake_case : Union[str, Any] = iter(self.loader )
__snake_case : int = None
return self
def __snake_case ( self : List[Any] ) -> List[Any]:
if self.subiterator is None:
__snake_case : Optional[int] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__snake_case : int = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
__snake_case : int = next(self.subiterator )
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __iter__( self : Any ) -> Optional[Any]:
__snake_case : str = iter(self.loader )
return self
def __snake_case ( self : Tuple ) -> str:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__snake_case : Dict = False
__snake_case : Dict = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__snake_case : Union[str, Any] = self.loader_batch_item()
__snake_case : Any = item.pop("is_last" )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
while not is_last:
__snake_case : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Optional[int] = processed
else:
__snake_case : Union[str, Any] = list(processed.keys() )[0]
__snake_case : Optional[Any] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : int = len(lowerCamelCase )
else:
__snake_case : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Dict = observed_batch_size
__snake_case : Union[str, Any] = processed
__snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
__snake_case : str = self.loader_batch_item()
__snake_case : str = item.pop("is_last" )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
else:
__snake_case : List[str] = processed
__snake_case : Tuple = item.pop("is_last" )
accumulator.append(lowerCamelCase )
return accumulator
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Dataset , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : int = dataset
__snake_case : Union[str, Any] = key
def __len__( self : Tuple ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__( self : Optional[Any] , lowerCamelCase : str ) -> Optional[int]:
return self.dataset[i][self.key]
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Dataset , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
__snake_case : Any = dataset
__snake_case : Any = keya
__snake_case : Union[str, Any] = keya
def __len__( self : Optional[int] ) -> Tuple:
return len(self.dataset )
def __getitem__( self : Tuple , lowerCamelCase : List[str] ) -> Optional[Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 123 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_a : Any= trt.Logger(trt.Logger.WARNING)
_a : Optional[int]= absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_a : Any= logging.getLogger(__name__)
_a : Union[str, Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
_a : Optional[Any]= parser.parse_args()
if args.tokenizer_name:
_a : Optional[Any]= AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
_a : List[Any]= args.per_device_eval_batch_size
_a : Tuple= (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_a : str= True
_a : int= "temp_engine/bert-fp32.engine"
if args.fpaa:
_a : List[str]= "temp_engine/bert-fp16.engine"
if args.inta:
_a : Optional[Any]= "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
_a : Optional[int]= 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_a : Tuple= [network.get_input(i) for i in range(network.num_inputs)]
_a : Optional[int]= [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_a : int= 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_a : List[Any]= builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_a : int= builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = np.asarray(inputs['input_ids'] , dtype=np.intaa )
__snake_case : str = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
__snake_case : Union[str, Any] = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , UpperCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , UpperCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , UpperCAmelCase_ )
# start time
__snake_case : str = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCAmelCase_ ) for d_inp in d_inputs] + [int(UpperCAmelCase_ ), int(UpperCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
cuda.memcpy_dtoh_async(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__snake_case : str = time.time()
__snake_case : int = end_time - start_time
__snake_case : List[str] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_a : Tuple= Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_a : Dict= load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_a : Union[str, Any]= raw_datasets["validation"].column_names
_a : Any= "question" if "question" in column_names else column_names[0]
_a : List[Any]= "context" if "context" in column_names else column_names[1]
_a : Tuple= "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_a : List[Any]= tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_a : str= min(args.max_seq_length, tokenizer.model_max_length)
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__snake_case : Any = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=UpperCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__snake_case : Optional[int] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__snake_case : Optional[Any] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__snake_case : Optional[int] = tokenized_examples.sequence_ids(UpperCAmelCase_ )
__snake_case : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__snake_case : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__snake_case : Optional[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_a : List[Any]= raw_datasets["validation"]
# Validation Feature Creation
_a : Dict= eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
_a : Optional[int]= default_data_collator
_a : Tuple= eval_dataset.remove_columns(["example_id", "offset_mapping"])
_a : List[Any]= DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str="eval" ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = postprocess_qa_predictions(
examples=UpperCAmelCase_ , features=UpperCAmelCase_ , predictions=UpperCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=UpperCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__snake_case : Dict = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
__snake_case : Optional[int] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
__snake_case : Union[str, Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCAmelCase_ , label_ids=UpperCAmelCase_ )
_a : List[Any]= load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(UpperCAmelCase_ ) ) * engine.get_binding_dtype(UpperCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
_a : Dict= [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_a : Dict= cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_a : int= cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_a : List[str]= cuda.mem_alloc(h_outputa.nbytes)
_a : Optional[Any]= cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_a : Optional[Any]= cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
_a : Union[str, Any]= 0.0
_a : Union[str, Any]= 0
_a : str= timeit.default_timer()
_a : List[str]= None
for step, batch in enumerate(eval_dataloader):
_a : Dict= model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_a : Optional[int]= outputs
_a : Optional[Any]= torch.tensor(start_logits)
_a : List[str]= torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_a : List[Any]= accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_a : str= accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_a : Any= (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_a : List[str]= logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_a : List[Any]= nested_truncate(all_preds, len(eval_dataset))
_a : Optional[Any]= timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000))
logger.info("Total Number of Inference = %d", niter)
_a : int= post_processing_function(eval_examples, eval_dataset, all_preds)
_a : Any= metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 355 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 ) -> int:
'''simple docstring'''
__snake_case : str = right or len(UpperCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase_ , UpperCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
from __future__ import annotations
def __UpperCamelCase ( _A : list[int] , _A : list[int] , _A : list[int] , _A : list[list[str]] , _A : int , ) ->None:
"""simple docstring"""
lowerCamelCase_ =len(_A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _A , _A , )
def __UpperCamelCase ( _A : int ) ->None:
"""simple docstring"""
lowerCamelCase_ =[]
depth_first_search([] , [] , [] , _A , _A )
# Print all the boards
for board in boards:
for column in board:
print(_A )
print("""""" )
print(len(_A ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 154 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Any , _A : Optional[int] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =s.rsplit(_A , _A )
return new.join(_A )
def __UpperCamelCase ( _A : List[Any] ) ->Dict:
"""simple docstring"""
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( _A : str ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase_ =key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowerCamelCase_ =key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowerCamelCase_ =rreplace(_A , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowerCamelCase_ =rreplace(_A , """.b""" , """.bias""" , 1 )
lowerCamelCase_ =value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] , _A : List[Any]=None , _A : Dict=True ) ->Optional[int]:
"""simple docstring"""
from dall_e import Encoder
lowerCamelCase_ =Encoder()
if os.path.exists(_A ):
lowerCamelCase_ =torch.load(_A )
else:
lowerCamelCase_ =torch.hub.load_state_dict_from_url(_A )
if isinstance(_A , _A ):
lowerCamelCase_ =ckpt.state_dict()
encoder.load_state_dict(_A )
if config_path is not None:
lowerCamelCase_ =FlavaImageCodebookConfig.from_pretrained(_A )
else:
lowerCamelCase_ =FlavaImageCodebookConfig()
lowerCamelCase_ =FlavaImageCodebook(_A ).eval()
lowerCamelCase_ =encoder.state_dict()
lowerCamelCase_ =upgrade_state_dict(_A )
hf_model.load_state_dict(_A )
lowerCamelCase_ =hf_model.state_dict()
lowerCamelCase_ =count_parameters(_A )
lowerCamelCase_ =count_parameters(_A )
assert torch.allclose(_A , _A , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_A )
else:
return hf_state_dict
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 154 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__magic_name__ = logging.get_logger(__name__)
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 361 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class snake_case__ ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
super().__init__()
__magic_name__ : Any = pad_token_id
__magic_name__ : Any = max_length
__magic_name__ : List[str] = vocab
__magic_name__ : List[Any] = merges
__magic_name__ : int = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
__magic_name__ : Union[str, Any] = [""" """.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : Union[str, Any] = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ ) -> List[Any]:
return cls(**lowerCAmelCase__ )
def __magic_name__ ( self ) -> int:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> int:
__magic_name__ : Dict = self.tf_tokenizer(lowerCAmelCase__ )
__magic_name__ : Dict = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ ,__magic_name__ : List[Any] = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 138 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCamelCase__ ( a , a , a ) -> Any:
_A: str = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"""
| 121 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase__ ( a , a=10_00 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_A: List[Any] = n - 1
_A: Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_A: List[str] = 0
while count < prec:
_A: Optional[int] = random.randint(2 , n - 1 )
_A: Union[str, Any] = bin_exp_mod(a , a , a )
if b != 1:
_A: Optional[Any] = True
for _ in range(a ):
if b == n - 1:
_A: int = False
break
_A: Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCAmelCase__ : Dict = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 121 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="pt" ):
lowercase__ = {"add_prefix_space": True} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not line.startswith(" " ) else {}
lowercase__ = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , ):
lowercase__ = input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case ( lowercase__):
def __init__( self : List[str], __lowercase : str, __lowercase : Optional[Any], __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : int="train", __lowercase : List[Any]=None, __lowercase : List[str]=None, __lowercase : int=None, __lowercase : Tuple="", ):
super().__init__()
lowercase__ = Path(__lowercase ).joinpath(type_path + ".source" )
lowercase__ = Path(__lowercase ).joinpath(type_path + ".target" )
lowercase__ = self.get_char_lens(self.src_file )
lowercase__ = max_source_length
lowercase__ = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
lowercase__ = tokenizer
lowercase__ = prefix
if n_obs is not None:
lowercase__ = self.src_lens[:n_obs]
lowercase__ = src_lang
lowercase__ = tgt_lang
def __len__( self : str ):
return len(self.src_lens )
def __getitem__( self : Tuple, __lowercase : Optional[Any] ):
lowercase__ = index + 1 # linecache starts at 1
lowercase__ = self.prefix + linecache.getline(str(self.src_file ), __lowercase ).rstrip("\n" )
lowercase__ = linecache.getline(str(self.tgt_file ), __lowercase ).rstrip("\n" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer, __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, __lowercase ) else self.tokenizer
)
lowercase__ = self.tokenizer.generator if isinstance(self.tokenizer, __lowercase ) else self.tokenizer
lowercase__ = encode_line(__lowercase, __lowercase, self.max_source_length, "right" )
lowercase__ = encode_line(__lowercase, __lowercase, self.max_target_length, "right" )
lowercase__ = source_inputs["input_ids"].squeeze()
lowercase__ = target_inputs["input_ids"].squeeze()
lowercase__ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A__ ( __lowercase : Optional[int] ):
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def A__ ( self : Union[str, Any], __lowercase : List[Any] ):
lowercase__ = torch.stack([x["input_ids"] for x in batch] )
lowercase__ = torch.stack([x["attention_mask"] for x in batch] )
lowercase__ = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, __lowercase )
else self.tokenizer.pad_token_id
)
lowercase__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, __lowercase )
else self.tokenizer.pad_token_id
)
lowercase__ = trim_batch(__lowercase, __lowercase )
lowercase__ , lowercase__ = trim_batch(__lowercase, __lowercase, attention_mask=__lowercase )
lowercase__ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ = getLogger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = get_git_info()
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , "git_log.json" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=4 , **SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( ):
lowercase__ = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
lowercase__ = {
"repo_id": str(SCREAMING_SNAKE_CASE_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return list(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def remove_articles(SCREAMING_SNAKE_CASE_ ):
return re.sub(r"\b(a|an|the)\b" , " " , SCREAMING_SNAKE_CASE_ )
def white_space_fix(SCREAMING_SNAKE_CASE_ ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ ):
lowercase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
lowercase__ = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
lowercase__ = Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
lowercase__ = sum(common.values() )
if num_same == 0:
return 0
lowercase__ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
lowercase__ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
lowercase__ = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
lowercase__ = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return model_prefix.startswith("rag" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ = "dropout_rate"
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
lowercase__ = p if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return hparams, config
| 367 |
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a__( A__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline
UpperCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {"image", "width", "height"}
UpperCAmelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {"image"}
UpperCAmelCase_ : int = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase_ : Union[str, Any] = False
# No `output_type`.
UpperCAmelCase_ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0)
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase = CLIPTextModel(__A)
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__A)).to(__A)
if str(__A).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__A)
else:
lowerCAmelCase = torch.Generator(device=__A).manual_seed(__A)
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = VideoToVideoSDPipeline(**__A)
lowerCAmelCase = sd_pipe.to(__A)
sd_pipe.set_progress_bar_config(disable=__A)
lowerCAmelCase = self.get_dummy_inputs(__A)
lowerCAmelCase = """np"""
lowerCAmelCase = sd_pipe(**__A).frames
lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A , expected_max_diff=5E-3)
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa)
pipe.enable_model_cpu_offload()
# 10 frames
lowerCAmelCase = torch.Generator(device="""cpu""").manual_seed(0)
lowerCAmelCase = torch.randn((1, 10, 3, 1024, 576) , generator=__A)
lowerCAmelCase = video.to("""cuda""")
lowerCAmelCase = """Spiderman is surfing"""
lowerCAmelCase = pipe(__A , video=__A , generator=__A , num_inference_steps=3 , output_type="""pt""").frames
lowerCAmelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656])
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1E-2
| 272 |
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image:
'''simple docstring'''
def brightness(lowercase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__UpperCAmelCase = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 84 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=None ) -> str:
# Recurse if needed
if "." in tensor_name:
snake_case = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
snake_case = new_module
snake_case = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
snake_case = tensor_name in module._buffers
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
snake_case = False
snake_case = False
if is_buffer or not is_bitsandbytes_available():
snake_case = False
snake_case = False
else:
snake_case = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
snake_case = value.to("""cpu""" )
if value.dtype == torch.inta:
snake_case = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
snake_case = torch.tensor(__lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
snake_case = new_value.T
snake_case = old_value.__dict__
if is_abit:
snake_case = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
snake_case = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
snake_case = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
snake_case = value.to(__lowerCAmelCase )
else:
snake_case = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
snake_case = new_value
else:
snake_case = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
snake_case = new_value
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
snake_case = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case , snake_case = module.weight.shape
else:
snake_case = module.in_features
snake_case = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case = True
# Store the module class in case we need to transpose the weight later
snake_case = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
snake_case , snake_case = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None ) -> List[Any]:
snake_case = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
snake_case , snake_case = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowerCamelCase ( *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( *__lowerCAmelCase : int , **__lowerCAmelCase : Any ) -> str:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : int ) -> List[Any]:
snake_case = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case = sum(__lowerCAmelCase , [] )
snake_case = len(__lowerCAmelCase ) > 0
# Check if it is a base model
snake_case = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case = list(model.named_children() )
snake_case = [list_modules[-1][0]]
# add last module together with tied weights
snake_case = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
snake_case = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
snake_case = [""".weight""", """.bias"""]
snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 3 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 1 |
_lowerCamelCase =8.3144598
def _a ( lowerCamelCase, lowerCamelCase ):
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_lowerCamelCase =3_0_0
_lowerCamelCase =2_8
_lowerCamelCase =rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 287 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 1 |
import numpy as np
def A_ ( A__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def A_ ( A__ ) -> np.ndarray:
return vector * sigmoid(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
def A_ ( A__ ) -> int:
if not isinstance(A__ , A__ ):
raise TypeError('only integers accepted as input' )
else:
a__ : List[Any] = str(abs(A__ ) )
a__ : Optional[int] = [list(A__ ) for char in range(len(A__ ) )]
for index in range(len(A__ ) ):
num_transpositions[index].pop(A__ )
return max(
int(''.join(list(A__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 225 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = """▁"""
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
lowerCamelCase = {
"""google/reformer-crime-and-punishment""": 52_4288,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]="</s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : int=[] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : Optional[int] , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str):
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowercase =self.sp_model.IdToPiece(_lowerCAmelCase)
return token
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =[]
__lowercase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase) + token
__lowercase =[]
else:
current_sub_tokens.append(_lowerCAmelCase)
out_string += self.sp_model.decode(_lowerCAmelCase)
return out_string.strip()
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 166 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : List[str] ) -> str:
snake_case = logging.get_logger()
# the current default level is logging.WARNING
snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__A )
def UpperCAmelCase(self : List[Any] ) -> Dict:
snake_case = logging.get_verbosity()
snake_case = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(__A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def UpperCAmelCase(self : str ) -> Optional[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
snake_case = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case = os.getenv("TRANSFORMERS_VERBOSITY" , __A )
snake_case = logging.log_levels[env_level_str]
snake_case = logging.get_verbosity()
self.assertEqual(
__A , __A , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
snake_case = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def UpperCAmelCase(self : str ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
snake_case = logging.logging.getLogger()
with CaptureLogger(__A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def UpperCAmelCase(self : str ) -> Any:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
snake_case = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__A ) as cl:
logger.warning_advice(__A )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__A ) as cl:
logger.warning_advice(__A )
self.assertEqual(cl.out , msg + "\n" )
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 351 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__(self : Union[str, Any] , _A : Any , _A : Tuple=1_3 , _A : Optional[int]=7 , _A : Any=True , _A : str=True , _A : Union[str, Any]=True , _A : Optional[int]=True , _A : str=9_9 , _A : str=2_4 , _A : int=2 , _A : Optional[Any]=6 , _A : int=3_7 , _A : List[Any]="gelu" , _A : str=0.1 , _A : Dict=0.1 , _A : Dict=5_1_2 , _A : Tuple=1_6 , _A : List[str]=2 , _A : Dict=0.02 , _A : List[str]=3 , _A : Optional[Any]=None , _A : Dict=1_0_0_0 , ) -> Any:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
snake_case = range_bbox
def UpperCAmelCase(self : List[str] ) -> List[str]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case = bbox[i, j, 3]
snake_case = bbox[i, j, 1]
snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case = bbox[i, j, 2]
snake_case = bbox[i, j, 0]
snake_case = t
snake_case = None
if self.use_input_mask:
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase(self : Tuple ) -> Tuple:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase(self : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : str , _A : Optional[Any] , _A : Tuple , ) -> Dict:
snake_case = LiltModel(config=_A )
model.to(_A )
model.eval()
snake_case = model(_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase(self : Optional[Any] , _A : Optional[int] , _A : Dict , _A : List[Any] , _A : Tuple , _A : Optional[int] , _A : Tuple , _A : Union[str, Any] , ) -> Optional[int]:
snake_case = self.num_labels
snake_case = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase(self : str , _A : List[Any] , _A : Union[str, Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , ) -> Optional[int]:
snake_case = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase(self : str ) -> str:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( A_ , A_ , A_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[int] = False
def UpperCAmelCase(self : Dict , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] , _A : int , _A : Union[str, Any] ) -> int:
return True
def UpperCAmelCase(self : str ) -> Tuple:
snake_case = LiltModelTester(self )
snake_case = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def UpperCAmelCase(self : Optional[int] ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase(self : Tuple ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : int ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
snake_case = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_A )
snake_case = torch.tensor([[1, 2]] , device=_A )
snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_A )
# forward pass
with torch.no_grad():
snake_case = model(input_ids=_A , bbox=_A )
snake_case = torch.Size([1, 2, 7_6_8] )
snake_case = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_A , )
self.assertTrue(outputs.last_hidden_state.shape , _A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _A , atol=1E-3 ) )
| 137 | 0 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_UpperCamelCase ):
A = ['note_seq']
def __init__( self : Tuple,*_A : List[Any],**_A : str ):
"""simple docstring"""
requires_backends(self,["note_seq"] )
@classmethod
def __UpperCamelCase ( cls : List[Any],*_A : str,**_A : Optional[Any] ):
"""simple docstring"""
requires_backends(cls,["note_seq"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any],*_A : Dict,**_A : Any ):
"""simple docstring"""
requires_backends(cls,["note_seq"] )
| 18 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=__lowerCAmelCase , )
assert hasattr(self , """env""" )
def a_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
A__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
A__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
A__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="""py36""" , )
def a_ ( self : List[str] , __lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(__lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def a_ ( self : Dict , __lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
A__ = self.create_estimator(__lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase )
| 356 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 276 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=0.6 , _lowerCamelCase=None , ):
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Dict = mask_ratio
UpperCAmelCase__ : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Any = ViTMAEModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : int = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = ViTMAEForPreTraining(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
UpperCAmelCase__ : Dict = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)
UpperCAmelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase :Tuple = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase :Any = False
lowerCAmelCase :Tuple = False
lowerCAmelCase :Any = False
lowerCAmelCase :int = False
def snake_case__ ( self):
UpperCAmelCase__ : str = ViTMAEModelTester(self)
UpperCAmelCase__ : Any = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""")
def snake_case__ ( self):
pass
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(_lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear))
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
# make masks reproducible
np.random.seed(2)
UpperCAmelCase__ : Any = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCAmelCase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCAmelCase__ : Optional[Any] = torch.from_numpy(_lowerCamelCase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Tuple = pt_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase))
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : int = model_class.from_pretrained(_lowerCamelCase)
model.to(_lowerCamelCase)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase))
# Make sure we don't have nans
UpperCAmelCase__ : str = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Tuple = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_lowerCamelCase , 1e-5)
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""")
def snake_case__ ( self):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""")
def snake_case__ ( self):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""")
def snake_case__ ( self):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""")
def snake_case__ ( self):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def snake_case__ ( self):
pass
@slow
def snake_case__ ( self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
def _UpperCamelCase ( ):
UpperCAmelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""") if is_vision_available() else None
@slow
def snake_case__ ( self):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCAmelCase__ : Optional[Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""").to(_lowerCamelCase)
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""").to(_lowerCamelCase)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[str] = ViTMAEConfig()
UpperCAmelCase__ : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCAmelCase__ : str = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**_lowerCamelCase , noise=torch.from_numpy(_lowerCamelCase).to(device=_lowerCamelCase))
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , _lowerCamelCase)
UpperCAmelCase__ : Any = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCamelCase) , atol=1e-4)) | 163 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False):
UpperCAmelCase__ : str = scheduler
UpperCAmelCase__ : Dict = optimizers if isinstance(_lowerCamelCase , (list, tuple)) else [optimizers]
UpperCAmelCase__ : List[Any] = split_batches
UpperCAmelCase__ : Tuple = step_with_optimizer
UpperCAmelCase__ : Union[str, Any] = GradientState()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase__ : Dict = AcceleratorState().num_processes
for _ in range(_lowerCamelCase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps"""):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_last_lr()
def snake_case__ ( self):
return self.scheduler.state_dict()
def snake_case__ ( self , _lowerCamelCase):
self.scheduler.load_state_dict(_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_lr()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase) | 163 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase__ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = "sgugger/tiny-distilbert-classification"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , only_pretrain_model=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> int:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , torchscript=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# set architectures equal to `None`
lowerCAmelCase__ = None
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Any ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def a ( self : int ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , save_to_csv=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) ).exists() )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE__ : List[Any] ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "sequential" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "cumulative" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "current" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) , log_print=SCREAMING_SNAKE_CASE__ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) ).exists() )
| 221 |
import random
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase_ )
elif element > pivot:
greater.append(lowerCAmelCase_ )
else:
equal.append(lowerCAmelCase_ )
return less, equal, greater
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int ):
"""simple docstring"""
if index >= len(lowerCAmelCase_ ) or index < 0:
return None
lowerCAmelCase__ = items[random.randint(0 , len(lowerCAmelCase_ ) - 1 )]
lowerCAmelCase__ = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _partition(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase_ , lowerCAmelCase_ )
# must be in larger
else:
return quick_select(lowerCAmelCase_ , index - (m + count) )
| 221 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : List[Any] ):
UpperCamelCase :int = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__lowercase ).to(__lowercase )
UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCamelCase :List[str] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
UpperCamelCase :int = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
UpperCamelCase :str = model(input_ids.to(__lowercase ) , labels=labels.to(__lowercase ) ).loss
UpperCamelCase :Tuple = -(labels.shape[-1] * loss.item())
UpperCamelCase :Optional[Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 38 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''README.md''')
print(F"""Generating {path}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(lowerCamelCase)
# make sure we are under the root of the project
_UpperCAmelCase : Dict = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = model_name.split("""-""")
_UpperCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 174 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__a = 2_99_79_24_58
# Symbols
__a , __a , __a , __a = symbols('''ct x y z''')
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_UpperCamelCase ) ** 2 )
def __lowercase ( _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_UpperCamelCase ), -gamma(_UpperCamelCase ) * beta(_UpperCamelCase ), 0, 0],
[-gamma(_UpperCamelCase ) * beta(_UpperCamelCase ), gamma(_UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->np.ndarray:
"""simple docstring"""
if event is None:
lowercase : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__a = transform(29_97_92_45)
print('''Example of four vector: ''')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__a = {ct: c, x: 1, y: 1, z: 1}
__a = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 173 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : int = 0
@slow
def __lowerCamelCase ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Check that tokenizer_type ≠ model_type
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase : Union[str, Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowercase : str = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __lowerCamelCase ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowercase : Any = TOKENIZER_MAPPING.values()
lowercase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = '''Hello, world. How are you?'''
lowercase : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : int = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Check we can load the tokenizer config of an online model.
lowercase : Optional[Any] = get_tokenizer_config('''bert-base-cased''' )
lowercase : str = config.pop('''_commit_hash''' , SCREAMING_SNAKE_CASE__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(SCREAMING_SNAKE_CASE__ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase : Union[str, Any] = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
lowercase : int = CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
# Can register in two steps
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __lowerCamelCase ( self ):
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = False
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = NewTokenizer
A : Optional[int] = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
lowercase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='''aaaaaa''' )
def __lowerCamelCase ( self ):
# Make sure we have cached the tokenizer.
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 173 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
_SCREAMING_SNAKE_CASE : List[Any] = f'''https://www.google.com/search?q={query}&num=100'''
_SCREAMING_SNAKE_CASE : str = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE : Dict = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
_SCREAMING_SNAKE_CASE : str = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 127 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
snake_case , snake_case , snake_case = t // 36_00, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=3_00 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case = F'''{elt:.6f}''' if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) else str(UpperCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A__ :
"""simple docstring"""
__magic_name__ = 5
__magic_name__ = 0.2
def __init__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = 3_0_0 , ):
snake_case = total
snake_case = '''''' if prefix is None else prefix
snake_case = leave
snake_case = parent
snake_case = width
snake_case = None
snake_case = None
snake_case = None
def a_ ( self , __snake_case , __snake_case = False , __snake_case = None ):
snake_case = value
if comment is not None:
snake_case = comment
if self.last_value is None:
snake_case = snake_case = time.time()
snake_case = snake_case = value
snake_case = snake_case = None
snake_case = self.warmup
snake_case = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case = time.time()
snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case = self.elapsed_time / (value - self.start_value)
else:
snake_case = None
if value >= self.total:
snake_case = self.total
snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
snake_case = value
snake_case = current_time
if self.average_time_per_item is None:
snake_case = 1
else:
snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a_ ( self , __snake_case , __snake_case=None ):
snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=None ):
super().__init__(__snake_case )
snake_case = None if column_names is None else [column_names]
snake_case = None
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self , __snake_case ):
if self.inner_table is None:
snake_case = [list(values.keys() ), list(values.values() )]
else:
snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def a_ ( self , __snake_case , __snake_case=None , __snake_case=3_0_0 ):
snake_case = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case )
return self.child_bar
def a_ ( self ):
snake_case = None
self.display()
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = None
snake_case = None
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
snake_case = 0
snake_case = 0
snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
snake_case = NotebookTrainingTracker(state.max_steps , __snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case = self.training_tracker.add_child(len(__snake_case ) )
else:
snake_case = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case = None
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case = state.global_step
self.training_tracker.write_line(__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if self.training_tracker is not None:
snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case = log['''loss''']
break
if self.first_column == "Epoch":
snake_case = int(state.epoch )
else:
snake_case = state.global_step
snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
snake_case = re.sub(R'''\_loss$''' , '''''' , __snake_case )
snake_case = metrics.pop('''total_flos''' , __snake_case )
snake_case = metrics.pop('''epoch''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __snake_case )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
snake_case = v
else:
snake_case = k.split('''_''' )
snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
snake_case = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
snake_case = None
# Evaluation takes a long time so we should force the next update.
snake_case = True
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__snake_case )
snake_case = None
| 127 | 1 |
"""simple docstring"""
import math
import os
import sys
def a__ ( lowerCAmelCase ) -> str:
UpperCAmelCase__ : Dict = """"""
try:
with open(lowerCAmelCase , """rb""" ) as binary_file:
UpperCAmelCase__ : List[Any] = binary_file.read()
for dat in data:
UpperCAmelCase__ : Optional[int] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
lexicon.pop(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = last_match_id
if math.loga(lowerCAmelCase ).is_integer():
for curr_key in lexicon:
UpperCAmelCase__ : List[Any] = """0""" + lexicon[curr_key]
UpperCAmelCase__ : Tuple = bin(lowerCAmelCase )[2:]
def a__ ( lowerCAmelCase ) -> str:
UpperCAmelCase__ : Tuple = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = """""", """"""
UpperCAmelCase__ : str = len(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase__ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
index += 1
UpperCAmelCase__ : Optional[Any] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCAmelCase__ : int = lexicon[curr_string]
result += last_match_id
return result
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : int = os.path.getsize(lowerCAmelCase )
UpperCAmelCase__ : int = bin(lowerCAmelCase )[2:]
UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase__ : Optional[Any] = 8
try:
with open(lowerCAmelCase , """wb""" ) as opened_file:
UpperCAmelCase__ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase__ : Optional[Any] = read_file_binary(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = compress_data(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = add_file_length(lowerCAmelCase , lowerCAmelCase )
write_file_binary(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 166 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'transfo-xl'
SCREAMING_SNAKE_CASE = ['mems']
SCREAMING_SNAKE_CASE = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , _lowerCamelCase=267735 , _lowerCamelCase=[20000, 40000, 200000] , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=4096 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=18 , _lowerCamelCase=1600 , _lowerCamelCase=1000 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=-1 , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="normal" , _lowerCamelCase=0.01 , _lowerCamelCase=0.01 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Dict = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
UpperCAmelCase__ : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase__ : List[Any] = [False] + [False] * len(self.cutoffs )
UpperCAmelCase__ : Dict = d_model
UpperCAmelCase__ : Dict = d_embed
UpperCAmelCase__ : List[Any] = d_head
UpperCAmelCase__ : List[str] = d_inner
UpperCAmelCase__ : Any = div_val
UpperCAmelCase__ : str = pre_lnorm
UpperCAmelCase__ : int = n_layer
UpperCAmelCase__ : Optional[Any] = n_head
UpperCAmelCase__ : Tuple = mem_len
UpperCAmelCase__ : Dict = same_length
UpperCAmelCase__ : Union[str, Any] = attn_type
UpperCAmelCase__ : Optional[int] = clamp_len
UpperCAmelCase__ : str = sample_softmax
UpperCAmelCase__ : Any = adaptive
UpperCAmelCase__ : List[Any] = dropout
UpperCAmelCase__ : List[Any] = dropatt
UpperCAmelCase__ : Tuple = untie_r
UpperCAmelCase__ : str = init
UpperCAmelCase__ : Optional[int] = init_range
UpperCAmelCase__ : Tuple = proj_init_std
UpperCAmelCase__ : str = init_std
UpperCAmelCase__ : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _a (self , _lowerCamelCase ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 166 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase_ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCAmelCase = numbers[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
# update the maximum and minimum subarray products
UpperCAmelCase = numbers[i]
if number < 0:
UpperCAmelCase = min_till_now, max_till_now
UpperCAmelCase = max(UpperCAmelCase_ , max_till_now * number )
UpperCAmelCase = min(UpperCAmelCase_ , min_till_now * number )
# update the maximum product found till now
UpperCAmelCase = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_prod
| 78 |
import sys
snake_case : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowerCamelCase ( UpperCAmelCase_ : str = N ):
"""simple docstring"""
a :Optional[Any] = -sys.maxsize - 1
for i in range(len(UpperCAmelCase_ ) - 12 ):
a :Dict = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a :str = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<sep>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<cls>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<eop>", "<eod>"] , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
lowercase__: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
lowercase__: Union[str, Any] = 3
lowercase__: Dict = do_lower_case
lowercase__: Tuple = remove_space
lowercase__: Optional[Any] = keep_accents
lowercase__: Optional[Any] = vocab_file
lowercase__: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__: Union[str, Any] = jieba
lowercase__: Any = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ):
return len(self.sp_model )
def _snake_case ( self ):
lowercase__: int = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase__: int = self.__dict__.copy()
lowercase__: Optional[int] = None
return state
def __setstate__( self , _UpperCAmelCase ):
lowercase__: int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__: Union[str, Any] = {}
lowercase__: Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _UpperCAmelCase ):
if self.remove_space:
lowercase__: Optional[int] = ''' '''.join(inputs.strip().split() )
else:
lowercase__: Optional[int] = inputs
lowercase__: Any = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__: Union[str, Any] = unicodedata.normalize('''NFKD''' , _UpperCAmelCase )
lowercase__: Optional[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
lowercase__: Optional[int] = outputs.lower()
return outputs
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = self.preprocess_text(_UpperCAmelCase )
lowercase__: int = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
lowercase__: Union[str, Any] = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__: Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__: Tuple = cur_pieces[1:]
else:
lowercase__: str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[Any] = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip()
return out_string
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Optional[int] = [self.sep_token_id]
lowercase__: Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: List[str] = [self.sep_token_id]
lowercase__: Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__: List[str] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: List[str] = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Dict = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 2 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase__() -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' ,type=__snake_case ,default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' ,type=__snake_case ,default=5 )
parser.add_argument('''--batch_size''' ,type=__snake_case ,default=6 )
parser.add_argument('''--gradient_accumulation_steps''' ,type=__snake_case ,default=1 )
parser.add_argument('''--freeze''' ,type=__snake_case ,default=__snake_case )
parser.add_argument('''--learning_rate''' ,type=__snake_case ,default=5E-4 )
parser.add_argument('''--seed''' ,type=__snake_case ,default=0 )
parser.add_argument('''--lr_scheduler_type''' ,type=__snake_case ,default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' ,type=__snake_case ,default=10 )
parser.add_argument('''--weight_decay''' ,type=__snake_case ,default=0.0_1 )
parser.add_argument('''--output_dir''' ,type=__snake_case ,default='''./results''' )
return parser.parse_args()
_a = load("accuracy")
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = eval_pred
lowerCamelCase__ = np.argmax(__snake_case ,axis=1 )
return metric.compute(predictions=__snake_case ,references=__snake_case )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = trainer
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if control.should_evaluate:
lowerCamelCase__ = deepcopy(__lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCAmelCase__() -> List[str]:
'''simple docstring'''
lowerCamelCase__ = get_args()
set_seed(args.seed )
lowerCamelCase__ = load_dataset('''codeparrot/codecomplex''' ,split='''train''' )
lowerCamelCase__ = dataset.train_test_split(test_size=0.2 )
lowerCamelCase__ = train_test['''test'''].train_test_split(test_size=0.5 )
lowerCamelCase__ = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 )
lowerCamelCase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCamelCase__ = False
lowerCamelCase__ = ClassLabel(num_classes=7 ,names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__snake_case ):
lowerCamelCase__ = tokenizer(example['''src'''] ,truncation=__snake_case ,max_length=1024 )
lowerCamelCase__ = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCamelCase__ = train_test_validation.map(
__snake_case ,batched=__snake_case ,remove_columns=train_test_validation['''train'''].column_names ,)
lowerCamelCase__ = DataCollatorWithPadding(tokenizer=__snake_case )
lowerCamelCase__ = TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy='''epoch''' ,save_strategy='''epoch''' ,logging_strategy='''epoch''' ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.0_1 ,metric_for_best_model='''accuracy''' ,run_name='''complexity-java''' ,report_to='''wandb''' ,)
lowerCamelCase__ = Trainer(
model=__snake_case ,args=__snake_case ,train_dataset=tokenized_datasets['''train'''] ,eval_dataset=tokenized_datasets['''valid'''] ,tokenizer=__snake_case ,data_collator=__snake_case ,compute_metrics=__snake_case ,)
print('''Training...''' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 209 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _UpperCamelCase :
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str]=3 , _SCREAMING_SNAKE_CASE: Union[str, Any]=7 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=99 , _SCREAMING_SNAKE_CASE: List[Any]=32 , _SCREAMING_SNAKE_CASE: int=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: str=37 , _SCREAMING_SNAKE_CASE: Any="gelu" , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: List[str]=512 , _SCREAMING_SNAKE_CASE: Optional[Any]=16 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: int=0.02 , _SCREAMING_SNAKE_CASE: List[str]=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def lowercase ( self: int ) -> int:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_SCREAMING_SNAKE_CASE , )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = FalconModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = FalconModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
UpperCamelCase_ = FalconForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = FalconForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )["hidden_states"][0]
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def lowercase ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Any = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : int = False
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = FalconModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowercase ( self: str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ , *UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCamelCase_ = alibi
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ = FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = "single_label_classification"
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ = FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = FalconForCausalLM(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = input_ids.shape[0]
UpperCamelCase_ = model._convert_to_rw_cache(result.past_key_values )
UpperCamelCase_ = model._convert_cache_to_standard_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for layer in range(len(_SCREAMING_SNAKE_CASE ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase ( self: Any ) -> int:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = "multi_label_classification"
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase_ = FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_SCREAMING_SNAKE_CASE , "use_cache" ):
return
UpperCamelCase_ = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
if "use_cache" not in inputs:
UpperCamelCase_ = True
UpperCamelCase_ = model(**_SCREAMING_SNAKE_CASE )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCamelCase_ = (
getattr(_SCREAMING_SNAKE_CASE , "decoder_layers" , _SCREAMING_SNAKE_CASE )
or getattr(_SCREAMING_SNAKE_CASE , "num_decoder_layers" , _SCREAMING_SNAKE_CASE )
or config.num_hidden_layers
)
UpperCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , "num_kv_heads" , config.num_attention_heads )
UpperCamelCase_ = getattr(_SCREAMING_SNAKE_CASE , "d_model" , config.hidden_size )
UpperCamelCase_ = embed_dim // num_attention_heads
UpperCamelCase_ = outputs["past_key_values"]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = inputs["input_ids"].shape
for i in range(_SCREAMING_SNAKE_CASE ):
if config.new_decoder_architecture:
UpperCamelCase_ = config.num_attention_heads
elif config.multi_query:
UpperCamelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
UpperCamelCase_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
UpperCamelCase_ = model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=19 )
UpperCamelCase_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: Tuple ) -> str:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCamelCase_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FalconForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**_SCREAMING_SNAKE_CASE , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCamelCase_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FalconForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.eval()
model.to(device=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# Test results are the same with and without cache
UpperCamelCase_ = model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=_SCREAMING_SNAKE_CASE )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 328 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_UpperCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self: int ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self: Dict ) -> Any:
"""simple docstring"""
self._test_save_load_local()
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 328 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = load_tool('text-classification' )
self.tool.setup()
__UpperCamelCase = load_tool('text-classification' , remote=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(__UpperCAmelCase , 'positive' )
| 316 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316 | 1 |
"""simple docstring"""
A_ : str = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def lowerCamelCase_ ( _lowerCamelCase ):
assert type(_lowerCamelCase ) in (int, float) and decimal == int(_lowerCamelCase )
lowerCamelCase__ : Any = int(_lowerCamelCase )
lowerCamelCase__ : Any = ''
lowerCamelCase__ : int = False
if decimal < 0:
lowerCamelCase__ : List[str] = True
decimal *= -1
while decimal > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = divmod(_lowerCamelCase , 16 )
lowerCamelCase__ : Union[str, Any] = values[remainder] + hexadecimal
lowerCamelCase__ : Dict = '0x' + hexadecimal
if negative:
lowerCamelCase__ : List[str] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316 | 1 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Dict = "ChineseCLIPImageProcessor"
UpperCAmelCase__ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Tuple = kwargs.pop('''feature_extractor''' )
_a : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : List[str] = self.image_processor
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> int:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_a : List[str] = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_a : Optional[Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_a : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Dict:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
| 235 | 0 |
"""simple docstring"""
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 357 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case : List[Any] = 16
snake_case : Tuple = 32
def __lowerCamelCase ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 16 ):
"""simple docstring"""
a :Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
a :List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase_ : int ):
# max_length=None => use the model max length (it's actually the default)
a :Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a :Optional[Any] = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a :List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a :int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a :Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
a :Tuple = 8
else:
a :Any = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
a :List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase )
a :Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
a :Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a :Dict = config['''lr''']
a :List[Any] = int(config['''num_epochs'''] )
a :Union[str, Any] = int(config['''seed'''] )
a :Dict = int(config['''batch_size'''] )
a :Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
a :Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a :Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
a :List[str] = MAX_GPU_BATCH_SIZE
set_seed(__lowerCamelCase )
a , a :str = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a :int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a :Any = model.to(accelerator.device )
# Instantiate optimizer
a :Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
# Instantiate scheduler
a :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a :str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a :Optional[Any] = model(**__lowerCamelCase )
a :Any = outputs.loss
a :Any = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a :Tuple = model(**__lowerCamelCase )
a :str = outputs.logits.argmax(dim=-1 )
a , a :Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
a :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCamelCase )
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
a :int = parser.parse_args()
a :str = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 94 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_vision_model'''
def __init__( self , A=768 , A=12 , A=3 , A=16 , A=288 , A=1 , A=1e-05 , A=False , A=True , A=False , **A , ) -> Dict:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = stop_gradient
_SCREAMING_SNAKE_CASE = share_layernorm
_SCREAMING_SNAKE_CASE = remove_last_layer
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_text_model'''
def __init__( self , A=5_0265 , A=768 , A=12 , A=12 , A=1 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=514 , A=1 , A=1e-05 , A=1 , A=0 , A=2 , A="absolute" , A=True , **A , ) -> Union[str, Any]:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower'''
def __init__( self , A=True , A="gelu" , A=768 , A=1 , A=1e-05 , A=False , A="add" , A=12 , A=6 , A=False , A=False , A=None , A=None , **A , ) -> Tuple:
# TODO: remove this once the Hub files are updated.
_SCREAMING_SNAKE_CASE = kwargs.pop("""text_config_dict""" , A )
_SCREAMING_SNAKE_CASE = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
_SCREAMING_SNAKE_CASE = share_cross_modal_transformer_layers
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = share_link_tower_layers
_SCREAMING_SNAKE_CASE = link_tower_type
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = tie_word_embeddings
_SCREAMING_SNAKE_CASE = init_layernorm_from_vision_encoder
if text_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_SCREAMING_SNAKE_CASE = BridgeTowerTextConfig(**A )
_SCREAMING_SNAKE_CASE = BridgeTowerVisionConfig(**A )
@classmethod
def snake_case_( cls , A , A , **A ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 58 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : Dict ={
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_A : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_A : Optional[int] =pd.read_csv('''sample_data.csv''', header=None)
_A : Any =df.shape[:1][0]
# If you're using some other dataset input the target column
_A : List[str] =df.iloc[:, 1:2]
_A : int =actual_data.values.reshape(len_data, 1)
_A : Union[str, Any] =MinMaxScaler().fit_transform(actual_data)
_A : Optional[int] =10
_A : Union[str, Any] =5
_A : Union[str, Any] =20
_A : str =len_data - periods * look_back
_A : List[Any] =actual_data[:division]
_A : Optional[Any] =actual_data[division - look_back :]
_A , _A : Tuple =[], []
_A , _A : List[str] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_A : List[Any] =np.array(train_x)
_A : str =np.array(test_x)
_A : List[Any] =np.array([list(i.ravel()) for i in train_y])
_A : Any =np.array([list(i.ravel()) for i in test_y])
_A : Optional[Any] =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_A : Dict =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_A : List[str] =model.predict(x_test)
| 129 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : int = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int ='roc_bert'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=True, lowerCAmelCase=0, lowerCAmelCase="absolute", lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=768, lowerCAmelCase=910, lowerCAmelCase=512, lowerCAmelCase=24_858, lowerCAmelCase=True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =use_cache
lowerCamelCase_ =enable_pronunciation
lowerCamelCase_ =enable_shape
lowerCamelCase_ =pronunciation_embed_dim
lowerCamelCase_ =pronunciation_vocab_size
lowerCamelCase_ =shape_embed_dim
lowerCamelCase_ =shape_vocab_size
lowerCamelCase_ =concat_input
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
| 75 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase_ ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowerCAmelCase, '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ =processor(text=lowerCAmelCase )
lowerCamelCase_ =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ):
"""simple docstring"""
np.random.seed(lowerCAmelCase )
return np.random.rand(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 )
lowerCamelCase_ =processor.decode(lowerCAmelCase )
lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('''</s> <s> </s>''', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase )
else:
with get_context(lowerCAmelCase ).Pool() as pool:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase, decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text )
self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =15
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =-4.0
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =2.0
lowerCamelCase_ =5.0
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =True
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
decoder.reset_params(
alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -2_0.0 )
self.assertEqual(lm_model.score_boundary, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =os.listdir(lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase )
lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()[0]
lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase )
lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) )
lowerCamelCase_ =iter(lowerCAmelCase )
lowerCamelCase_ =next(lowerCAmelCase )
lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy()
lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase )
lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase_ =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text )
# output times
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) )
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) )
# fmt: off
lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
| 75 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Tuple = '''codegen'''
SCREAMING_SNAKE_CASE_ : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__=5_04_00 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=28 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="gelu_new" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=1E-5 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=5_02_56 ,SCREAMING_SNAKE_CASE__=5_02_56 ,SCREAMING_SNAKE_CASE__=False ,**SCREAMING_SNAKE_CASE__ ,) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = vocab_size
__SCREAMING_SNAKE_CASE :List[str] = n_ctx
__SCREAMING_SNAKE_CASE :int = n_positions
__SCREAMING_SNAKE_CASE :Union[str, Any] = n_embd
__SCREAMING_SNAKE_CASE :str = n_layer
__SCREAMING_SNAKE_CASE :List[Any] = n_head
__SCREAMING_SNAKE_CASE :List[Any] = n_inner
__SCREAMING_SNAKE_CASE :Optional[int] = rotary_dim
__SCREAMING_SNAKE_CASE :int = activation_function
__SCREAMING_SNAKE_CASE :Union[str, Any] = resid_pdrop
__SCREAMING_SNAKE_CASE :Optional[int] = embd_pdrop
__SCREAMING_SNAKE_CASE :List[str] = attn_pdrop
__SCREAMING_SNAKE_CASE :List[Any] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :Tuple = use_cache
__SCREAMING_SNAKE_CASE :Union[str, Any] = bos_token_id
__SCREAMING_SNAKE_CASE :int = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = "default" ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ,) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ ,task=SCREAMING_SNAKE_CASE__ ,patching_specs=SCREAMING_SNAKE_CASE__ ,use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config ,'''pad_token_id''' ,SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE :Dict = 0
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='''inputs''' )
__SCREAMING_SNAKE_CASE :Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._config.n_head
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = super(SCREAMING_SNAKE_CASE__ ,self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE :Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE :int = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE :Any = seqlen + 2
__SCREAMING_SNAKE_CASE :List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE :List[str] = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE :Any = common_inputs['''attention_mask''']
if self.use_past:
__SCREAMING_SNAKE_CASE :List[Any] = ordered_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE :List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return 13
| 359 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
lowerCamelCase_ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
lowerCamelCase_ = 0
for log in Path().glob("*.log"):
lowerCamelCase_ = 0
with open(log, "r") as f:
for line in f:
lowerCamelCase_ = json.loads(line)
if line.get("nodeid", "") != "":
lowerCamelCase_ = line["nodeid"]
if line.get("duration", None) is not None:
lowerCamelCase_ = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase_ = []
log.unlink()
lowerCamelCase_ = ""
lowerCamelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase_ = []
lowerCamelCase_ = {}
for test in failed_tests:
lowerCamelCase_ = test[0].split("::")
lowerCamelCase_ = data[0].split("/")[-1]
if data[0] not in filesafailed:
lowerCamelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase_ = [test[0] for test in failed_table]
lowerCamelCase_ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase_ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCamelCase_ = "Too many failed tests, please see the full report in the Action results."
lowerCamelCase_ = len(err) + 1_0
lowerCamelCase_ = message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
lowerCamelCase_ = "No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
lowerCamelCase_ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCamelCase_ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCamelCase_ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
lowerCamelCase_ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase_ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase_ = row[0]
else:
lowerCamelCase_ = ""
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
) | 239 | 0 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int):
if len(_lowerCamelCase) == 0:
raise ValueError("find_max() arg is an empty sequence")
if (
left >= len(_lowerCamelCase)
or left < -len(_lowerCamelCase)
or right >= len(_lowerCamelCase)
or right < -len(_lowerCamelCase)
):
raise IndexError("list index out of range")
if left == right:
return nums[left]
lowercase__ : List[str] = (left + right) >> 1 # the middle
lowercase__ : Dict = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # find max in range[left, mid]
lowercase__ : Tuple = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 87 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 107 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def a_ ( *lowerCAmelCase_ : Union[str, Any] ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = list(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def a_ ( lowerCAmelCase_ : Exception ):
__lowerCAmelCase = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def a_ ( lowerCAmelCase_ : callable = None, lowerCAmelCase_ : int = 128 ):
if function is None:
return functools.partial(lowerCAmelCase_, starting_batch_size=lowerCAmelCase_ )
__lowerCAmelCase = starting_batch_size
def decorator(*lowerCAmelCase_ : Dict, **lowerCAmelCase_ : Dict ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowerCAmelCase = list(inspect.signature(lowerCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase_ ) < (len(lowerCAmelCase_ ) + 1):
__lowerCAmelCase = ', '.join([F"""{arg}={value}""" for arg, value in zip(params[1:], args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowerCAmelCase_, *lowerCAmelCase_, **lowerCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 207 |
from collections.abc import Sequence
def a_ ( lowerCAmelCase_ : Sequence[float], lowerCAmelCase_ : bool = False ):
if not arr:
return 0
__lowerCAmelCase = 0 if allow_empty_subarrays else float('-inf' )
__lowerCAmelCase = 0.0
for num in arr:
__lowerCAmelCase = max(0 if allow_empty_subarrays else num, curr_sum + num )
__lowerCAmelCase = max(lowerCAmelCase_, lowerCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 207 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
__magic_name__ : List[str] = ord(lowerCAmelCase )
if not _is_chinese_char(lowerCAmelCase ):
return 0
return 1
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : Dict = set()
for token in tokens:
__magic_name__ : int = len(lowerCAmelCase ) > 1 and is_chinese(lowerCAmelCase )
if chinese_word:
word_set.add(lowerCAmelCase )
__magic_name__ : Union[str, Any] = list(lowerCAmelCase )
return word_list
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__magic_name__ : Any = max([len(lowerCAmelCase ) for w in chinese_word_set] )
__magic_name__ : int = bert_tokens
__magic_name__ , __magic_name__ : int = 0, len(lowerCAmelCase )
while start < end:
__magic_name__ : Union[str, Any] = True
if is_chinese(bert_word[start] ):
__magic_name__ : List[str] = min(end - start , lowerCAmelCase )
for i in range(lowerCAmelCase , 1 , -1 ):
__magic_name__ : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__magic_name__ : Union[str, Any] = '##' + bert_word[j]
__magic_name__ : Any = start + i
__magic_name__ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : LTP , lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
__magic_name__ : int = []
for i in range(0 , len(lowerCAmelCase ) , 100 ):
__magic_name__ : str = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__magic_name__ : int = [get_chinese_word(lowerCAmelCase ) for r in res]
ltp_res.extend(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
__magic_name__ : Optional[Any] = []
for i in range(0 , len(lowerCAmelCase ) , 100 ):
__magic_name__ : Optional[int] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase , truncation=lowerCAmelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
__magic_name__ : List[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Union[str, Any] = []
for id in input_ids:
__magic_name__ : Union[str, Any] = bert_tokenizer._convert_id_to_token(lowerCAmelCase )
input_tokens.append(lowerCAmelCase )
__magic_name__ : Union[str, Any] = add_sub_symbol(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase ):
if token[:2] == "##":
__magic_name__ : Optional[int] = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase ) == 1 and _is_chinese_char(ord(lowerCAmelCase ) ):
ref_id.append(lowerCAmelCase )
ref_ids.append(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
return ref_ids
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__magic_name__ : Optional[Any] = f.readlines()
__magic_name__ : int = [line.strip() for line in data if len(lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ : int = LTP(args.ltp ) # faster in GPU device
__magic_name__ : int = BertTokenizer.from_pretrained(args.bert )
__magic_name__ : Union[str, Any] = prepare_ref(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__magic_name__ : Optional[int] = [json.dumps(lowerCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
lowerCAmelCase :str = parser.parse_args()
main(args) | 331 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 1 |
"""simple docstring"""
class a :
def __init__( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None ):
_UpperCAmelCase = data
_UpperCAmelCase = previous
_UpperCAmelCase = next_node
def __str__( self : Optional[Any] ):
return f'''{self.data}'''
def lowerCAmelCase_ ( self : Any ):
return self.data
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.next
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.previous
class a :
def __init__( self : str , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = head
def __iter__( self : Dict ):
return self
def lowerCAmelCase_ ( self : Optional[int] ):
if not self.current:
raise StopIteration
else:
_UpperCAmelCase = self.current.get_data()
_UpperCAmelCase = self.current.get_next()
return value
class a :
def __init__( self : Tuple ):
_UpperCAmelCase = None # First node in list
_UpperCAmelCase = None # Last node in list
def __str__( self : Dict ):
_UpperCAmelCase = self.head
_UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
_UpperCAmelCase = current.get_next()
return " ".join(str(__lowerCAmelCase ) for node in nodes )
def __contains__( self : Optional[int] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
_UpperCAmelCase = current.get_next()
return False
def __iter__( self : str ):
return LinkedListIterator(self.head )
def lowerCAmelCase_ ( self : Any ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase_ ( self : str ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Node ):
if self.head is None:
_UpperCAmelCase = node
_UpperCAmelCase = node
else:
self.insert_before_node(self.head , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Node ):
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = Node(__lowerCAmelCase )
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.set_tail(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
_UpperCAmelCase = node
_UpperCAmelCase = node.previous
if node.get_previous() is None:
_UpperCAmelCase = node_to_insert
else:
_UpperCAmelCase = node_to_insert
_UpperCAmelCase = node_to_insert
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
_UpperCAmelCase = node
_UpperCAmelCase = node.next
if node.get_next() is None:
_UpperCAmelCase = node_to_insert
else:
_UpperCAmelCase = node_to_insert
_UpperCAmelCase = node_to_insert
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = 1
_UpperCAmelCase = Node(__lowerCAmelCase )
_UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCAmelCase , __lowerCAmelCase )
return
current_position += 1
_UpperCAmelCase = node.next
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
_UpperCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] ):
if (node := self.get_node(__lowerCAmelCase )) is not None:
if node == self.head:
_UpperCAmelCase = self.head.get_next()
if node == self.tail:
_UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(__lowerCAmelCase )
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : Node ):
if node.get_next():
_UpperCAmelCase = node.previous
if node.get_previous():
_UpperCAmelCase = node.next
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : Dict ):
return self.head is None
def __UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | """simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | 0 |
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float(moles / volume ) * nfactor )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowercase ( ):
print('Making key files...' )
make_key_files('rsa' , 1_0_2_4 )
print('Key files generation successful.' )
def __lowercase ( __lowerCAmelCase : int ):
print('Generating prime p...' )
a__ = rabinMiller.generate_large_prime(__lowerCAmelCase )
print('Generating prime q...' )
a__ = rabinMiller.generate_large_prime(__lowerCAmelCase )
a__ = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
a__ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__lowerCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
a__ = cryptoMath.find_mod_inverse(__lowerCAmelCase , (p - 1) * (q - 1) )
a__ = (n, e)
a__ = (n, d)
return (public_key, private_key)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
a__ , a__ = generate_key(__lowerCAmelCase )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 240 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : int , lowercase__ : int , lowercase__ : float , **lowercase__ : Any):
'''simple docstring'''
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = kwargs.pop('padding_side' , 'right')
lowerCAmelCase__ = kwargs.pop('return_attention_mask' , lowercase__)
super().__init__(**lowercase__)
def __snake_case ( self : Union[str, Any] , lowercase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowercase__ : Union[bool, str, PaddingStrategy] = True , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(lowercase__ , (list, tuple)) and isinstance(processed_features[0] , (dict, BatchFeature)):
lowerCAmelCase__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys())}""")
lowerCAmelCase__ = processed_features[self.model_input_names[0]]
lowerCAmelCase__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase__) == 0:
if return_attention_mask:
lowerCAmelCase__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCAmelCase__ = required_input[0]
if isinstance(lowercase__ , (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCAmelCase__ = 0
while len(required_input[index]) == 0:
index += 1
if index < len(lowercase__):
lowerCAmelCase__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase__):
lowerCAmelCase__ = 'tf'
elif is_torch_tensor(lowercase__):
lowerCAmelCase__ = 'pt'
elif isinstance(lowercase__ , (int, float, list, tuple, np.ndarray)):
lowerCAmelCase__ = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase__)}. """
'Should be one of a python, numpy, pytorch or tensorflow object.')
for key, value in processed_features.items():
if isinstance(value[0] , (int, float)):
lowerCAmelCase__ = to_numpy(lowercase__)
else:
lowerCAmelCase__ = [to_numpy(lowercase__) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCAmelCase__ = self._get_padding_strategies(padding=lowercase__ , max_length=lowercase__)
lowerCAmelCase__ = processed_features[self.model_input_names[0]]
lowerCAmelCase__ = len(lowercase__)
if not all(len(lowercase__) == batch_size for v in processed_features.values()):
raise ValueError('Some items in the output dictionary have a different batch size than others.')
lowerCAmelCase__ = []
for i in range(lowercase__):
lowerCAmelCase__ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCAmelCase__ = self._truncate(
lowercase__ , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , truncation=lowercase__ , )
truncated_inputs.append(lowercase__)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCAmelCase__ = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
lowerCAmelCase__ = PaddingStrategy.MAX_LENGTH
lowerCAmelCase__ = {}
for i in range(lowercase__):
# padding
lowerCAmelCase__ = self._pad(
truncated_inputs[i] , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCAmelCase__ = []
if value.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = value.astype(np.floataa)
batch_outputs[key].append(lowercase__)
return BatchFeature(lowercase__ , tensor_type=lowercase__)
def __snake_case ( self : List[str] , lowercase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase__ : Optional[int] = None , lowercase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , ):
'''simple docstring'''
lowerCAmelCase__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCAmelCase__ = len(lowercase__)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase__) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCAmelCase__ = np.ones(len(lowercase__) , dtype=np.intaa)
if needs_to_be_padded:
lowerCAmelCase__ = max_length - len(lowercase__)
if self.padding_side == "right":
if return_attention_mask:
lowerCAmelCase__ = np.pad(
processed_features['attention_mask'] , (0, difference))
lowerCAmelCase__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCAmelCase__ = np.pad(
lowercase__ , lowercase__ , 'constant' , constant_values=self.padding_value)
elif self.padding_side == "left":
if return_attention_mask:
lowerCAmelCase__ = np.pad(
processed_features['attention_mask'] , (difference, 0))
lowerCAmelCase__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCAmelCase__ = np.pad(
lowercase__ , lowercase__ , 'constant' , constant_values=self.padding_value)
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return processed_features
def __snake_case ( self : Union[str, Any] , lowercase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase__ : Optional[int] = None , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')
lowerCAmelCase__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase__ = len(lowercase__) > max_length
if needs_to_be_truncated:
lowerCAmelCase__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCAmelCase__ = processed_features['attention_mask'][:max_length]
return processed_features
def __snake_case ( self : int , lowercase__ : Dict=False , lowercase__ : Union[str, Any]=None):
'''simple docstring'''
if padding is not False:
if padding is True:
lowerCAmelCase__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = PaddingStrategy(lowercase__)
elif isinstance(lowercase__ , lowercase__):
lowerCAmelCase__ = padding
else:
lowerCAmelCase__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""")
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.')
return padding_strategy
| 119 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['image_processor']
UpperCAmelCase_ = 'SamImageProcessor'
def __init__( self : Tuple , lowercase__ : Dict):
'''simple docstring'''
super().__init__(lowercase__)
lowerCAmelCase__ = self.image_processor
lowerCAmelCase__ = -10
lowerCAmelCase__ = self.image_processor.size['longest_edge']
def __call__( self : List[Any] , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor(
lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase__ = encoding_image_processor['original_sizes']
if hasattr(lowercase__ , 'numpy'): # Checks if Torch or TF tensor
lowerCAmelCase__ = original_sizes.numpy()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._check_and_preprocess_points(
input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , )
lowerCAmelCase__ = self._normalize_and_convert(
lowercase__ , lowercase__ , input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , return_tensors=lowercase__ , )
return encoding_image_processor
def __snake_case ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : str=None , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : Optional[Any]="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0]) for point in input_points
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__)
for point, original_size in zip(lowercase__ , lowercase__)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowerCAmelCase__ , lowerCAmelCase__ = self._pad_points_and_labels(lowercase__ , lowercase__)
lowerCAmelCase__ = np.array(lowercase__)
if input_labels is not None:
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0] , is_bounding_box=lowercase__)
for box in input_boxes
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__ , is_bounding_box=lowercase__)
for box, original_size in zip(lowercase__ , lowercase__)
]
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def __snake_case ( self : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = max([point.shape[0] for point in input_points])
lowerCAmelCase__ = []
for i, point in enumerate(lowercase__):
if point.shape[0] != expected_nb_points:
lowerCAmelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowerCAmelCase__ = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(lowercase__)
lowerCAmelCase__ = processed_input_points
return input_points, input_labels
def __snake_case ( self : Optional[Any] , lowercase__ : int , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : Optional[Any]=False):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = original_size
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor._get_preprocess_shape(lowercase__ , longest_edge=lowercase__)
lowerCAmelCase__ = deepcopy(lowercase__).astype(lowercase__)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 2 , 2)
lowerCAmelCase__ = coords[..., 0] * (new_w / old_w)
lowerCAmelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 4)
return coords
def __snake_case ( self : Dict , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : int=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(lowercase__ , 'numpy'): # Checks for TF or Torch tensor
lowerCAmelCase__ = input_points.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_points[0] , lowercase__):
raise ValueError('Input points must be a list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__) for input_point in input_points]
else:
lowerCAmelCase__ = None
if input_labels is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_labels.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_labels[0] , lowercase__):
raise ValueError('Input labels must be a list of list integers.')
lowerCAmelCase__ = [np.array(lowercase__) for label in input_labels]
else:
lowerCAmelCase__ = None
if input_boxes is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(lowercase__ , lowercase__)
or not isinstance(input_boxes[0] , lowercase__)
or not isinstance(input_boxes[0][0] , lowercase__)
):
raise ValueError('Input boxes must be a list of list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__).astype(np.floataa) for box in input_boxes]
else:
lowerCAmelCase__ = None
return input_points, input_labels, input_boxes
@property
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase__))
def __snake_case ( self : int , *lowercase__ : int , **lowercase__ : int):
'''simple docstring'''
return self.image_processor.post_process_masks(*lowercase__ , **lowercase__)
| 119 | 1 |
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case_ = mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
snake_case_ = max(
mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , j - wt[i - 1] ) + val[i - 1] , )
snake_case_ = val
return f[i][j]
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case_ = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
snake_case_ = len(lowerCAmelCase__ )
if num_items != len(lowerCAmelCase__ ):
snake_case_ = (
'''The number of weights must be the same as the number of values.\n'''
f"But got {num_items} weights and {len(lowerCAmelCase__ )} values"
)
raise ValueError(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
if not isinstance(wt[i] , lowerCAmelCase__ ):
snake_case_ = (
'''All weights must be integers but got weight of '''
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(lowerCAmelCase__ )
snake_case_ = knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ = set()
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return optimal_val, example_optional_set
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
optimal_set.add(lowerCAmelCase__ )
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , j - wt[i - 1] , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : Tuple = [3, 2, 4, 4]
lowercase__ : List[str] = [4, 3, 2, 3]
lowercase__ : Tuple = 4
lowercase__ : str = 6
lowercase__ : Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase__ , lowercase__ : Any = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase__ , lowercase__ : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 187 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase__ ={
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
lowercase__ ={
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Optional[int] = RoFormerTokenizer
def __init__(self : List[str] , snake_case_ : Optional[int]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=True , snake_case_ : str="[UNK]" , snake_case_ : Dict="[SEP]" , snake_case_ : Any="[PAD]" , snake_case_ : str="[CLS]" , snake_case_ : List[Any]="[MASK]" , snake_case_ : Any=True , snake_case_ : List[str]=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , snake_case_ ) != strip_accents
):
__a : List[str] = getattr(snake_case_ , pre_tok_state.pop('''type''' ) )
__a : Optional[Any] = do_lower_case
__a : Optional[int] = strip_accents
__a : List[str] = pre_tok_class(**snake_case_ )
__a : Optional[Any] = do_lower_case
def __getstate__(self : Union[str, Any] ):
__a : Any = self.__dict__.copy()
__a : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__(self : Tuple , snake_case_ : Optional[Any] ):
__a : Dict = d
__a : str = self.__dict__['''_tokenizer'''].get_vocab()
__a : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ):
__a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Optional[Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
__a : List[str] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 216 | 0 |
def lowerCamelCase ( a_ ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
lowerCAmelCase_ = [True] * (num + 1)
lowerCAmelCase_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
lowerCAmelCase_ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 354 |
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class _snake_case ( A__ , A__ ):
_A : str = '''resnet'''
_A : List[Any] = ['''basic''', '''bottleneck''']
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=64 ,SCREAMING_SNAKE_CASE__ : Any=[256, 512, 1_024, 2_048] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 4, 6, 3] ,SCREAMING_SNAKE_CASE__ : int="bottleneck" ,SCREAMING_SNAKE_CASE__ : str="relu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(**UpperCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
SCREAMING_SNAKE_CASE:Optional[Any] = num_channels
SCREAMING_SNAKE_CASE:int = embedding_size
SCREAMING_SNAKE_CASE:Dict = hidden_sizes
SCREAMING_SNAKE_CASE:int = depths
SCREAMING_SNAKE_CASE:int = layer_type
SCREAMING_SNAKE_CASE:List[Any] = hidden_act
SCREAMING_SNAKE_CASE:Optional[int] = downsample_in_first_stage
SCREAMING_SNAKE_CASE:Tuple = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 ,len(UpperCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE:Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ ,out_indices=UpperCamelCase_ ,stage_names=self.stage_names )
class _snake_case ( A__ ):
_A : List[Any] = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : List[str] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __UpperCamelCase ( self : List[str] ):
return 1e-3
| 139 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .")
self.register_modules(
speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def UpperCAmelCase__ ( self , __UpperCAmelCase = "auto") ->int:
if slice_size == "auto":
a_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
self.enable_attention_slicing(__UpperCAmelCase)
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=1_60_00 , __UpperCAmelCase = 5_12 , __UpperCAmelCase = 5_12 , __UpperCAmelCase = 50 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ) ->int:
a_ = self.speech_processor.feature_extractor(
__UpperCAmelCase , return_tensors="pt" , sampling_rate=__UpperCAmelCase).input_features.to(self.device)
a_ = self.speech_model.generate(__UpperCAmelCase , max_length=48_00_00)
a_ = self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase)[
0
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = len(__UpperCAmelCase)
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase)}''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__UpperCAmelCase)}.''')
# get prompt text embeddings
a_ = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
a_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
a_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''')
a_ = text_input_ids[:, : self.tokenizer.model_max_length]
a_ = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
a_ , a_ , a_ = text_embeddings.shape
a_ = text_embeddings.repeat(1 , __UpperCAmelCase , 1)
a_ = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a_ = 42
if negative_prompt is None:
a_ = [""] * batch_size
elif type(__UpperCAmelCase) is not type(__UpperCAmelCase):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase)} !='''
F''' {type(__UpperCAmelCase)}.''')
elif isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [negative_prompt]
elif batch_size != len(__UpperCAmelCase):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase)}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`.")
else:
a_ = negative_prompt
a_ = text_input_ids.shape[-1]
a_ = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
a_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a_ = uncond_embeddings.shape[1]
a_ = uncond_embeddings.repeat(1 , __UpperCAmelCase , 1)
a_ = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a_ = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
a_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
a_ = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase).to(
self.device)
else:
a_ = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
a_ = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
a_ = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
a_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a_ = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
a_ = {}
if accepts_eta:
a_ = eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase)):
# expand the latents if we are doing classifier free guidance
a_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a_ = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
# predict the noise residual
a_ = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase).sample
# perform guidance
if do_classifier_free_guidance:
a_ , a_ = noise_pred.chunk(2)
a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
a_ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = 1 / 0.18_215 * latents
a_ = self.vae.decode(__UpperCAmelCase).sample
a_ = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(__UpperCAmelCase)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase) | 303 |
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model | 303 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 |
"""simple docstring"""
from __future__ import annotations
_lowercase : Dict = 1.6_021E-19 # units = C
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''roberta-prelayernorm'''
def __init__( self : Optional[int] , _UpperCAmelCase : int=50265 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[int]=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Tuple=1e-12 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : int=True , _UpperCAmelCase : str=None , **_UpperCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 367 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase = namedtuple("""CoinsDistribResult""", """moves excess""")
def a__ ( lowerCAmelCase__ ):
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase__ ) != count_coins(lowerCAmelCase__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowerCAmelCase__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase__ )
+ abs(lowerCAmelCase__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase__ , lowerCAmelCase__ )
return get_distrib(lowerCAmelCase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str ={
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
class _lowercase :
def __init__( self: Optional[Any] ):
lowerCamelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase__ : List[str] = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Tuple = TrieNode()
lowerCamelCase__ : List[Any] = curr.nodes[char]
lowerCamelCase__ : Any = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Any = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
def _delete(UpperCamelCase__: TrieNode , UpperCamelCase__: str , UpperCamelCase__: int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : str = False
return len(curr.nodes ) == 0
lowerCamelCase__ : List[str] = word[index]
lowerCamelCase__ : Dict = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : List[Any] = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
if node.is_leaf:
print(UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase , word + key )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : str = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase )
# print_words(root, "")
assert all(root.find(UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
print(str(UpperCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 41 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
__SCREAMING_SNAKE_CASE : Any = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
__SCREAMING_SNAKE_CASE : Dict = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
__SCREAMING_SNAKE_CASE : List[Any] = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
__SCREAMING_SNAKE_CASE : int = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
__SCREAMING_SNAKE_CASE : Optional[Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
__SCREAMING_SNAKE_CASE : Optional[Any] = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
__SCREAMING_SNAKE_CASE : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 363 |
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = F"""class {class_name}("""
_lowerCamelCase = F"""{4 * " "}def {test_name}("""
_lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = []
for line in lines:
if line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
_lowerCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowerCamelCase = _lowerCamelCase = _lowerCamelCase = _lowerCamelCase = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , '''w''' ) as f:
for line in new_lines:
f.write(lowercase_ )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = {l.strip() for l in f.readlines()}
else:
_lowerCamelCase = None
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = defaultdict(lowercase_ )
for line in correct_lines:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 73 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.